dccp: Add check for sequence number in ICMPv6 message
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/xfrm.h>
18
19 #include <net/addrconf.h>
20 #include <net/inet_common.h>
21 #include <net/inet_hashtables.h>
22 #include <net/inet_sock.h>
23 #include <net/inet6_connection_sock.h>
24 #include <net/inet6_hashtables.h>
25 #include <net/ip6_route.h>
26 #include <net/ipv6.h>
27 #include <net/protocol.h>
28 #include <net/transp_v6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/xfrm.h>
31
32 #include "dccp.h"
33 #include "ipv6.h"
34 #include "feat.h"
35
36 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
37
38 static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
39 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
40
41 static void dccp_v6_hash(struct sock *sk)
42 {
43 if (sk->sk_state != DCCP_CLOSED) {
44 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
45 inet_hash(sk);
46 return;
47 }
48 local_bh_disable();
49 __inet6_hash(sk);
50 local_bh_enable();
51 }
52 }
53
54 /* add pseudo-header to DCCP checksum stored in skb->csum */
55 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
56 struct in6_addr *saddr,
57 struct in6_addr *daddr)
58 {
59 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
60 }
61
62 static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
63 struct sk_buff *skb)
64 {
65 struct ipv6_pinfo *np = inet6_sk(sk);
66 struct dccp_hdr *dh = dccp_hdr(skb);
67
68 dccp_csum_outgoing(skb);
69 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
70 }
71
72 static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
73 __be16 sport, __be16 dport )
74 {
75 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
76 }
77
78 static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
79 {
80 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
81 ipv6_hdr(skb)->saddr.s6_addr32,
82 dccp_hdr(skb)->dccph_dport,
83 dccp_hdr(skb)->dccph_sport );
84
85 }
86
87 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
88 int type, int code, int offset, __be32 info)
89 {
90 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
92 struct dccp_sock *dp;
93 struct ipv6_pinfo *np;
94 struct sock *sk;
95 int err;
96 __u64 seq;
97 struct net *net = dev_net(skb->dev);
98
99 sk = inet6_lookup(net, &dccp_hashinfo,
100 &hdr->daddr, dh->dccph_dport,
101 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
102
103 if (sk == NULL) {
104 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
105 return;
106 }
107
108 if (sk->sk_state == DCCP_TIME_WAIT) {
109 inet_twsk_put(inet_twsk(sk));
110 return;
111 }
112
113 bh_lock_sock(sk);
114 if (sock_owned_by_user(sk))
115 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
116
117 if (sk->sk_state == DCCP_CLOSED)
118 goto out;
119
120 dp = dccp_sk(sk);
121 seq = dccp_hdr_seq(dh);
122 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
123 !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
124 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
125 goto out;
126 }
127
128 np = inet6_sk(sk);
129
130 if (type == ICMPV6_PKT_TOOBIG) {
131 struct dst_entry *dst = NULL;
132
133 if (sock_owned_by_user(sk))
134 goto out;
135 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
136 goto out;
137
138 /* icmp should have updated the destination cache entry */
139 dst = __sk_dst_check(sk, np->dst_cookie);
140 if (dst == NULL) {
141 struct inet_sock *inet = inet_sk(sk);
142 struct flowi fl;
143
144 /* BUGGG_FUTURE: Again, it is not clear how
145 to handle rthdr case. Ignore this complexity
146 for now.
147 */
148 memset(&fl, 0, sizeof(fl));
149 fl.proto = IPPROTO_DCCP;
150 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
151 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
152 fl.oif = sk->sk_bound_dev_if;
153 fl.fl_ip_dport = inet->dport;
154 fl.fl_ip_sport = inet->sport;
155 security_sk_classify_flow(sk, &fl);
156
157 err = ip6_dst_lookup(sk, &dst, &fl);
158 if (err) {
159 sk->sk_err_soft = -err;
160 goto out;
161 }
162
163 err = xfrm_lookup(&dst, &fl, sk, 0);
164 if (err < 0) {
165 sk->sk_err_soft = -err;
166 goto out;
167 }
168 } else
169 dst_hold(dst);
170
171 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
172 dccp_sync_mss(sk, dst_mtu(dst));
173 } /* else let the usual retransmit timer handle it */
174 dst_release(dst);
175 goto out;
176 }
177
178 icmpv6_err_convert(type, code, &err);
179
180 /* Might be for an request_sock */
181 switch (sk->sk_state) {
182 struct request_sock *req, **prev;
183 case DCCP_LISTEN:
184 if (sock_owned_by_user(sk))
185 goto out;
186
187 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
188 &hdr->daddr, &hdr->saddr,
189 inet6_iif(skb));
190 if (req == NULL)
191 goto out;
192
193 /*
194 * ICMPs are not backlogged, hence we cannot get an established
195 * socket here.
196 */
197 WARN_ON(req->sk != NULL);
198
199 if (seq != dccp_rsk(req)->dreq_iss) {
200 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
201 goto out;
202 }
203
204 inet_csk_reqsk_queue_drop(sk, req, prev);
205 goto out;
206
207 case DCCP_REQUESTING:
208 case DCCP_RESPOND: /* Cannot happen.
209 It can, it SYNs are crossed. --ANK */
210 if (!sock_owned_by_user(sk)) {
211 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
212 sk->sk_err = err;
213 /*
214 * Wake people up to see the error
215 * (see connect in sock.c)
216 */
217 sk->sk_error_report(sk);
218 dccp_done(sk);
219 } else
220 sk->sk_err_soft = err;
221 goto out;
222 }
223
224 if (!sock_owned_by_user(sk) && np->recverr) {
225 sk->sk_err = err;
226 sk->sk_error_report(sk);
227 } else
228 sk->sk_err_soft = err;
229
230 out:
231 bh_unlock_sock(sk);
232 sock_put(sk);
233 }
234
235
236 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
237 {
238 struct inet6_request_sock *ireq6 = inet6_rsk(req);
239 struct ipv6_pinfo *np = inet6_sk(sk);
240 struct sk_buff *skb;
241 struct ipv6_txoptions *opt = NULL;
242 struct in6_addr *final_p = NULL, final;
243 struct flowi fl;
244 int err = -1;
245 struct dst_entry *dst;
246
247 memset(&fl, 0, sizeof(fl));
248 fl.proto = IPPROTO_DCCP;
249 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
250 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
251 fl.fl6_flowlabel = 0;
252 fl.oif = ireq6->iif;
253 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
254 fl.fl_ip_sport = inet_sk(sk)->sport;
255 security_req_classify_flow(req, &fl);
256
257 opt = np->opt;
258
259 if (opt != NULL && opt->srcrt != NULL) {
260 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
261
262 ipv6_addr_copy(&final, &fl.fl6_dst);
263 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
264 final_p = &final;
265 }
266
267 err = ip6_dst_lookup(sk, &dst, &fl);
268 if (err)
269 goto done;
270
271 if (final_p)
272 ipv6_addr_copy(&fl.fl6_dst, final_p);
273
274 err = xfrm_lookup(&dst, &fl, sk, 0);
275 if (err < 0)
276 goto done;
277
278 skb = dccp_make_response(sk, dst, req);
279 if (skb != NULL) {
280 struct dccp_hdr *dh = dccp_hdr(skb);
281
282 dh->dccph_checksum = dccp_v6_csum_finish(skb,
283 &ireq6->loc_addr,
284 &ireq6->rmt_addr);
285 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
286 err = ip6_xmit(sk, skb, &fl, opt, 0);
287 err = net_xmit_eval(err);
288 }
289
290 done:
291 if (opt != NULL && opt != np->opt)
292 sock_kfree_s(sk, opt, opt->tot_len);
293 dst_release(dst);
294 return err;
295 }
296
297 static void dccp_v6_reqsk_destructor(struct request_sock *req)
298 {
299 if (inet6_rsk(req)->pktopts != NULL)
300 kfree_skb(inet6_rsk(req)->pktopts);
301 }
302
303 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
304 {
305 struct ipv6hdr *rxip6h;
306 struct sk_buff *skb;
307 struct flowi fl;
308 struct net *net = dev_net(rxskb->dst->dev);
309 struct sock *ctl_sk = net->dccp.v6_ctl_sk;
310
311 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
312 return;
313
314 if (!ipv6_unicast_destination(rxskb))
315 return;
316
317 skb = dccp_ctl_make_reset(ctl_sk, rxskb);
318 if (skb == NULL)
319 return;
320
321 rxip6h = ipv6_hdr(rxskb);
322 dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
323 &rxip6h->daddr);
324
325 memset(&fl, 0, sizeof(fl));
326 ipv6_addr_copy(&fl.fl6_dst, &rxip6h->saddr);
327 ipv6_addr_copy(&fl.fl6_src, &rxip6h->daddr);
328
329 fl.proto = IPPROTO_DCCP;
330 fl.oif = inet6_iif(rxskb);
331 fl.fl_ip_dport = dccp_hdr(skb)->dccph_dport;
332 fl.fl_ip_sport = dccp_hdr(skb)->dccph_sport;
333 security_skb_classify_flow(rxskb, &fl);
334
335 /* sk = NULL, but it is safe for now. RST socket required. */
336 if (!ip6_dst_lookup(ctl_sk, &skb->dst, &fl)) {
337 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
338 ip6_xmit(ctl_sk, skb, &fl, NULL, 0);
339 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
340 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
341 return;
342 }
343 }
344
345 kfree_skb(skb);
346 }
347
348 static struct request_sock_ops dccp6_request_sock_ops = {
349 .family = AF_INET6,
350 .obj_size = sizeof(struct dccp6_request_sock),
351 .rtx_syn_ack = dccp_v6_send_response,
352 .send_ack = dccp_reqsk_send_ack,
353 .destructor = dccp_v6_reqsk_destructor,
354 .send_reset = dccp_v6_ctl_send_reset,
355 };
356
357 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
358 {
359 const struct dccp_hdr *dh = dccp_hdr(skb);
360 const struct ipv6hdr *iph = ipv6_hdr(skb);
361 struct sock *nsk;
362 struct request_sock **prev;
363 /* Find possible connection requests. */
364 struct request_sock *req = inet6_csk_search_req(sk, &prev,
365 dh->dccph_sport,
366 &iph->saddr,
367 &iph->daddr,
368 inet6_iif(skb));
369 if (req != NULL)
370 return dccp_check_req(sk, skb, req, prev);
371
372 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
373 &iph->saddr, dh->dccph_sport,
374 &iph->daddr, ntohs(dh->dccph_dport),
375 inet6_iif(skb));
376 if (nsk != NULL) {
377 if (nsk->sk_state != DCCP_TIME_WAIT) {
378 bh_lock_sock(nsk);
379 return nsk;
380 }
381 inet_twsk_put(inet_twsk(nsk));
382 return NULL;
383 }
384
385 return sk;
386 }
387
388 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
389 {
390 struct request_sock *req;
391 struct dccp_request_sock *dreq;
392 struct inet6_request_sock *ireq6;
393 struct ipv6_pinfo *np = inet6_sk(sk);
394 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
395 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
396
397 if (skb->protocol == htons(ETH_P_IP))
398 return dccp_v4_conn_request(sk, skb);
399
400 if (!ipv6_unicast_destination(skb))
401 return 0; /* discard, don't send a reset here */
402
403 if (dccp_bad_service_code(sk, service)) {
404 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
405 goto drop;
406 }
407 /*
408 * There are no SYN attacks on IPv6, yet...
409 */
410 dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
411 if (inet_csk_reqsk_queue_is_full(sk))
412 goto drop;
413
414 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
415 goto drop;
416
417 req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
418 if (req == NULL)
419 goto drop;
420
421 dccp_reqsk_init(req, skb);
422
423 dreq = dccp_rsk(req);
424 if (dccp_parse_options(sk, dreq, skb))
425 goto drop_and_free;
426
427 if (security_inet_conn_request(sk, skb, req))
428 goto drop_and_free;
429
430 ireq6 = inet6_rsk(req);
431 ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
432 ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
433
434 if (ipv6_opt_accepted(sk, skb) ||
435 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
436 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
437 atomic_inc(&skb->users);
438 ireq6->pktopts = skb;
439 }
440 ireq6->iif = sk->sk_bound_dev_if;
441
442 /* So that link locals have meaning */
443 if (!sk->sk_bound_dev_if &&
444 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
445 ireq6->iif = inet6_iif(skb);
446
447 /*
448 * Step 3: Process LISTEN state
449 *
450 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
451 *
452 * In fact we defer setting S.GSR, S.SWL, S.SWH to
453 * dccp_create_openreq_child.
454 */
455 dreq->dreq_isr = dcb->dccpd_seq;
456 dreq->dreq_iss = dccp_v6_init_sequence(skb);
457 dreq->dreq_service = service;
458
459 if (dccp_v6_send_response(sk, req))
460 goto drop_and_free;
461
462 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
463 return 0;
464
465 drop_and_free:
466 reqsk_free(req);
467 drop:
468 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
469 return -1;
470 }
471
472 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
473 struct sk_buff *skb,
474 struct request_sock *req,
475 struct dst_entry *dst)
476 {
477 struct inet6_request_sock *ireq6 = inet6_rsk(req);
478 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
479 struct inet_sock *newinet;
480 struct dccp_sock *newdp;
481 struct dccp6_sock *newdp6;
482 struct sock *newsk;
483 struct ipv6_txoptions *opt;
484
485 if (skb->protocol == htons(ETH_P_IP)) {
486 /*
487 * v6 mapped
488 */
489 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
490 if (newsk == NULL)
491 return NULL;
492
493 newdp6 = (struct dccp6_sock *)newsk;
494 newdp = dccp_sk(newsk);
495 newinet = inet_sk(newsk);
496 newinet->pinet6 = &newdp6->inet6;
497 newnp = inet6_sk(newsk);
498
499 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
500
501 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
502 newinet->daddr);
503
504 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
505 newinet->saddr);
506
507 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
508
509 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
510 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
511 newnp->pktoptions = NULL;
512 newnp->opt = NULL;
513 newnp->mcast_oif = inet6_iif(skb);
514 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
515
516 /*
517 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
518 * here, dccp_create_openreq_child now does this for us, see the comment in
519 * that function for the gory details. -acme
520 */
521
522 /* It is tricky place. Until this moment IPv4 tcp
523 worked with IPv6 icsk.icsk_af_ops.
524 Sync it now.
525 */
526 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
527
528 return newsk;
529 }
530
531 opt = np->opt;
532
533 if (sk_acceptq_is_full(sk))
534 goto out_overflow;
535
536 if (dst == NULL) {
537 struct in6_addr *final_p = NULL, final;
538 struct flowi fl;
539
540 memset(&fl, 0, sizeof(fl));
541 fl.proto = IPPROTO_DCCP;
542 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
543 if (opt != NULL && opt->srcrt != NULL) {
544 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
545
546 ipv6_addr_copy(&final, &fl.fl6_dst);
547 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
548 final_p = &final;
549 }
550 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
551 fl.oif = sk->sk_bound_dev_if;
552 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
553 fl.fl_ip_sport = inet_sk(sk)->sport;
554 security_sk_classify_flow(sk, &fl);
555
556 if (ip6_dst_lookup(sk, &dst, &fl))
557 goto out;
558
559 if (final_p)
560 ipv6_addr_copy(&fl.fl6_dst, final_p);
561
562 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
563 goto out;
564 }
565
566 newsk = dccp_create_openreq_child(sk, req, skb);
567 if (newsk == NULL)
568 goto out;
569
570 /*
571 * No need to charge this sock to the relevant IPv6 refcnt debug socks
572 * count here, dccp_create_openreq_child now does this for us, see the
573 * comment in that function for the gory details. -acme
574 */
575
576 __ip6_dst_store(newsk, dst, NULL, NULL);
577 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
578 NETIF_F_TSO);
579 newdp6 = (struct dccp6_sock *)newsk;
580 newinet = inet_sk(newsk);
581 newinet->pinet6 = &newdp6->inet6;
582 newdp = dccp_sk(newsk);
583 newnp = inet6_sk(newsk);
584
585 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
586
587 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
588 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
589 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
590 newsk->sk_bound_dev_if = ireq6->iif;
591
592 /* Now IPv6 options...
593
594 First: no IPv4 options.
595 */
596 newinet->opt = NULL;
597
598 /* Clone RX bits */
599 newnp->rxopt.all = np->rxopt.all;
600
601 /* Clone pktoptions received with SYN */
602 newnp->pktoptions = NULL;
603 if (ireq6->pktopts != NULL) {
604 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
605 kfree_skb(ireq6->pktopts);
606 ireq6->pktopts = NULL;
607 if (newnp->pktoptions)
608 skb_set_owner_r(newnp->pktoptions, newsk);
609 }
610 newnp->opt = NULL;
611 newnp->mcast_oif = inet6_iif(skb);
612 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
613
614 /*
615 * Clone native IPv6 options from listening socket (if any)
616 *
617 * Yes, keeping reference count would be much more clever, but we make
618 * one more one thing there: reattach optmem to newsk.
619 */
620 if (opt != NULL) {
621 newnp->opt = ipv6_dup_options(newsk, opt);
622 if (opt != np->opt)
623 sock_kfree_s(sk, opt, opt->tot_len);
624 }
625
626 inet_csk(newsk)->icsk_ext_hdr_len = 0;
627 if (newnp->opt != NULL)
628 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
629 newnp->opt->opt_flen);
630
631 dccp_sync_mss(newsk, dst_mtu(dst));
632
633 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
634
635 __inet6_hash(newsk);
636 __inet_inherit_port(sk, newsk);
637
638 return newsk;
639
640 out_overflow:
641 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
642 out:
643 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
644 if (opt != NULL && opt != np->opt)
645 sock_kfree_s(sk, opt, opt->tot_len);
646 dst_release(dst);
647 return NULL;
648 }
649
650 /* The socket must have it's spinlock held when we get
651 * here.
652 *
653 * We have a potential double-lock case here, so even when
654 * doing backlog processing we use the BH locking scheme.
655 * This is because we cannot sleep with the original spinlock
656 * held.
657 */
658 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
659 {
660 struct ipv6_pinfo *np = inet6_sk(sk);
661 struct sk_buff *opt_skb = NULL;
662
663 /* Imagine: socket is IPv6. IPv4 packet arrives,
664 goes to IPv4 receive handler and backlogged.
665 From backlog it always goes here. Kerboom...
666 Fortunately, dccp_rcv_established and rcv_established
667 handle them correctly, but it is not case with
668 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
669 */
670
671 if (skb->protocol == htons(ETH_P_IP))
672 return dccp_v4_do_rcv(sk, skb);
673
674 if (sk_filter(sk, skb))
675 goto discard;
676
677 /*
678 * socket locking is here for SMP purposes as backlog rcv is currently
679 * called with bh processing disabled.
680 */
681
682 /* Do Stevens' IPV6_PKTOPTIONS.
683
684 Yes, guys, it is the only place in our code, where we
685 may make it not affecting IPv4.
686 The rest of code is protocol independent,
687 and I do not like idea to uglify IPv4.
688
689 Actually, all the idea behind IPV6_PKTOPTIONS
690 looks not very well thought. For now we latch
691 options, received in the last packet, enqueued
692 by tcp. Feel free to propose better solution.
693 --ANK (980728)
694 */
695 if (np->rxopt.all)
696 /*
697 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
698 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
699 */
700 opt_skb = skb_clone(skb, GFP_ATOMIC);
701
702 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
703 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
704 goto reset;
705 if (opt_skb) {
706 /* XXX This is where we would goto ipv6_pktoptions. */
707 __kfree_skb(opt_skb);
708 }
709 return 0;
710 }
711
712 /*
713 * Step 3: Process LISTEN state
714 * If S.state == LISTEN,
715 * If P.type == Request or P contains a valid Init Cookie option,
716 * (* Must scan the packet's options to check for Init
717 * Cookies. Only Init Cookies are processed here,
718 * however; other options are processed in Step 8. This
719 * scan need only be performed if the endpoint uses Init
720 * Cookies *)
721 * (* Generate a new socket and switch to that socket *)
722 * Set S := new socket for this port pair
723 * S.state = RESPOND
724 * Choose S.ISS (initial seqno) or set from Init Cookies
725 * Initialize S.GAR := S.ISS
726 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
727 * Continue with S.state == RESPOND
728 * (* A Response packet will be generated in Step 11 *)
729 * Otherwise,
730 * Generate Reset(No Connection) unless P.type == Reset
731 * Drop packet and return
732 *
733 * NOTE: the check for the packet types is done in
734 * dccp_rcv_state_process
735 */
736 if (sk->sk_state == DCCP_LISTEN) {
737 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
738
739 if (nsk == NULL)
740 goto discard;
741 /*
742 * Queue it on the new socket if the new socket is active,
743 * otherwise we just shortcircuit this and continue with
744 * the new socket..
745 */
746 if (nsk != sk) {
747 if (dccp_child_process(sk, nsk, skb))
748 goto reset;
749 if (opt_skb != NULL)
750 __kfree_skb(opt_skb);
751 return 0;
752 }
753 }
754
755 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
756 goto reset;
757 if (opt_skb) {
758 /* XXX This is where we would goto ipv6_pktoptions. */
759 __kfree_skb(opt_skb);
760 }
761 return 0;
762
763 reset:
764 dccp_v6_ctl_send_reset(sk, skb);
765 discard:
766 if (opt_skb != NULL)
767 __kfree_skb(opt_skb);
768 kfree_skb(skb);
769 return 0;
770 }
771
772 static int dccp_v6_rcv(struct sk_buff *skb)
773 {
774 const struct dccp_hdr *dh;
775 struct sock *sk;
776 int min_cov;
777
778 /* Step 1: Check header basics */
779
780 if (dccp_invalid_packet(skb))
781 goto discard_it;
782
783 /* Step 1: If header checksum is incorrect, drop packet and return. */
784 if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
785 &ipv6_hdr(skb)->daddr)) {
786 DCCP_WARN("dropped packet with invalid checksum\n");
787 goto discard_it;
788 }
789
790 dh = dccp_hdr(skb);
791
792 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
793 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
794
795 if (dccp_packet_without_ack(skb))
796 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
797 else
798 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
799
800 /* Step 2:
801 * Look up flow ID in table and get corresponding socket */
802 sk = __inet6_lookup(dev_net(skb->dst->dev), &dccp_hashinfo,
803 &ipv6_hdr(skb)->saddr, dh->dccph_sport,
804 &ipv6_hdr(skb)->daddr, ntohs(dh->dccph_dport),
805 inet6_iif(skb));
806 /*
807 * Step 2:
808 * If no socket ...
809 */
810 if (sk == NULL) {
811 dccp_pr_debug("failed to look up flow ID in table and "
812 "get corresponding socket\n");
813 goto no_dccp_socket;
814 }
815
816 /*
817 * Step 2:
818 * ... or S.state == TIMEWAIT,
819 * Generate Reset(No Connection) unless P.type == Reset
820 * Drop packet and return
821 */
822 if (sk->sk_state == DCCP_TIME_WAIT) {
823 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
824 inet_twsk_put(inet_twsk(sk));
825 goto no_dccp_socket;
826 }
827
828 /*
829 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
830 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
831 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
832 */
833 min_cov = dccp_sk(sk)->dccps_pcrlen;
834 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
835 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
836 dh->dccph_cscov, min_cov);
837 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
838 goto discard_and_relse;
839 }
840
841 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
842 goto discard_and_relse;
843
844 return sk_receive_skb(sk, skb, 1) ? -1 : 0;
845
846 no_dccp_socket:
847 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
848 goto discard_it;
849 /*
850 * Step 2:
851 * If no socket ...
852 * Generate Reset(No Connection) unless P.type == Reset
853 * Drop packet and return
854 */
855 if (dh->dccph_type != DCCP_PKT_RESET) {
856 DCCP_SKB_CB(skb)->dccpd_reset_code =
857 DCCP_RESET_CODE_NO_CONNECTION;
858 dccp_v6_ctl_send_reset(sk, skb);
859 }
860
861 discard_it:
862 kfree_skb(skb);
863 return 0;
864
865 discard_and_relse:
866 sock_put(sk);
867 goto discard_it;
868 }
869
870 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
871 int addr_len)
872 {
873 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
874 struct inet_connection_sock *icsk = inet_csk(sk);
875 struct inet_sock *inet = inet_sk(sk);
876 struct ipv6_pinfo *np = inet6_sk(sk);
877 struct dccp_sock *dp = dccp_sk(sk);
878 struct in6_addr *saddr = NULL, *final_p = NULL, final;
879 struct flowi fl;
880 struct dst_entry *dst;
881 int addr_type;
882 int err;
883
884 dp->dccps_role = DCCP_ROLE_CLIENT;
885
886 if (addr_len < SIN6_LEN_RFC2133)
887 return -EINVAL;
888
889 if (usin->sin6_family != AF_INET6)
890 return -EAFNOSUPPORT;
891
892 memset(&fl, 0, sizeof(fl));
893
894 if (np->sndflow) {
895 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
896 IP6_ECN_flow_init(fl.fl6_flowlabel);
897 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
898 struct ip6_flowlabel *flowlabel;
899 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
900 if (flowlabel == NULL)
901 return -EINVAL;
902 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
903 fl6_sock_release(flowlabel);
904 }
905 }
906 /*
907 * connect() to INADDR_ANY means loopback (BSD'ism).
908 */
909 if (ipv6_addr_any(&usin->sin6_addr))
910 usin->sin6_addr.s6_addr[15] = 1;
911
912 addr_type = ipv6_addr_type(&usin->sin6_addr);
913
914 if (addr_type & IPV6_ADDR_MULTICAST)
915 return -ENETUNREACH;
916
917 if (addr_type & IPV6_ADDR_LINKLOCAL) {
918 if (addr_len >= sizeof(struct sockaddr_in6) &&
919 usin->sin6_scope_id) {
920 /* If interface is set while binding, indices
921 * must coincide.
922 */
923 if (sk->sk_bound_dev_if &&
924 sk->sk_bound_dev_if != usin->sin6_scope_id)
925 return -EINVAL;
926
927 sk->sk_bound_dev_if = usin->sin6_scope_id;
928 }
929
930 /* Connect to link-local address requires an interface */
931 if (!sk->sk_bound_dev_if)
932 return -EINVAL;
933 }
934
935 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
936 np->flow_label = fl.fl6_flowlabel;
937
938 /*
939 * DCCP over IPv4
940 */
941 if (addr_type == IPV6_ADDR_MAPPED) {
942 u32 exthdrlen = icsk->icsk_ext_hdr_len;
943 struct sockaddr_in sin;
944
945 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
946
947 if (__ipv6_only_sock(sk))
948 return -ENETUNREACH;
949
950 sin.sin_family = AF_INET;
951 sin.sin_port = usin->sin6_port;
952 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
953
954 icsk->icsk_af_ops = &dccp_ipv6_mapped;
955 sk->sk_backlog_rcv = dccp_v4_do_rcv;
956
957 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
958 if (err) {
959 icsk->icsk_ext_hdr_len = exthdrlen;
960 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
961 sk->sk_backlog_rcv = dccp_v6_do_rcv;
962 goto failure;
963 } else {
964 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
965 inet->saddr);
966 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
967 inet->rcv_saddr);
968 }
969
970 return err;
971 }
972
973 if (!ipv6_addr_any(&np->rcv_saddr))
974 saddr = &np->rcv_saddr;
975
976 fl.proto = IPPROTO_DCCP;
977 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
978 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
979 fl.oif = sk->sk_bound_dev_if;
980 fl.fl_ip_dport = usin->sin6_port;
981 fl.fl_ip_sport = inet->sport;
982 security_sk_classify_flow(sk, &fl);
983
984 if (np->opt != NULL && np->opt->srcrt != NULL) {
985 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
986
987 ipv6_addr_copy(&final, &fl.fl6_dst);
988 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
989 final_p = &final;
990 }
991
992 err = ip6_dst_lookup(sk, &dst, &fl);
993 if (err)
994 goto failure;
995
996 if (final_p)
997 ipv6_addr_copy(&fl.fl6_dst, final_p);
998
999 err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT);
1000 if (err < 0) {
1001 if (err == -EREMOTE)
1002 err = ip6_dst_blackhole(sk, &dst, &fl);
1003 if (err < 0)
1004 goto failure;
1005 }
1006
1007 if (saddr == NULL) {
1008 saddr = &fl.fl6_src;
1009 ipv6_addr_copy(&np->rcv_saddr, saddr);
1010 }
1011
1012 /* set the source address */
1013 ipv6_addr_copy(&np->saddr, saddr);
1014 inet->rcv_saddr = LOOPBACK4_IPV6;
1015
1016 __ip6_dst_store(sk, dst, NULL, NULL);
1017
1018 icsk->icsk_ext_hdr_len = 0;
1019 if (np->opt != NULL)
1020 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
1021 np->opt->opt_nflen);
1022
1023 inet->dport = usin->sin6_port;
1024
1025 dccp_set_state(sk, DCCP_REQUESTING);
1026 err = inet6_hash_connect(&dccp_death_row, sk);
1027 if (err)
1028 goto late_failure;
1029
1030 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
1031 np->daddr.s6_addr32,
1032 inet->sport, inet->dport);
1033 err = dccp_connect(sk);
1034 if (err)
1035 goto late_failure;
1036
1037 return 0;
1038
1039 late_failure:
1040 dccp_set_state(sk, DCCP_CLOSED);
1041 __sk_dst_reset(sk);
1042 failure:
1043 inet->dport = 0;
1044 sk->sk_route_caps = 0;
1045 return err;
1046 }
1047
1048 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1049 .queue_xmit = inet6_csk_xmit,
1050 .send_check = dccp_v6_send_check,
1051 .rebuild_header = inet6_sk_rebuild_header,
1052 .conn_request = dccp_v6_conn_request,
1053 .syn_recv_sock = dccp_v6_request_recv_sock,
1054 .net_header_len = sizeof(struct ipv6hdr),
1055 .setsockopt = ipv6_setsockopt,
1056 .getsockopt = ipv6_getsockopt,
1057 .addr2sockaddr = inet6_csk_addr2sockaddr,
1058 .sockaddr_len = sizeof(struct sockaddr_in6),
1059 .bind_conflict = inet6_csk_bind_conflict,
1060 #ifdef CONFIG_COMPAT
1061 .compat_setsockopt = compat_ipv6_setsockopt,
1062 .compat_getsockopt = compat_ipv6_getsockopt,
1063 #endif
1064 };
1065
1066 /*
1067 * DCCP over IPv4 via INET6 API
1068 */
1069 static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1070 .queue_xmit = ip_queue_xmit,
1071 .send_check = dccp_v4_send_check,
1072 .rebuild_header = inet_sk_rebuild_header,
1073 .conn_request = dccp_v6_conn_request,
1074 .syn_recv_sock = dccp_v6_request_recv_sock,
1075 .net_header_len = sizeof(struct iphdr),
1076 .setsockopt = ipv6_setsockopt,
1077 .getsockopt = ipv6_getsockopt,
1078 .addr2sockaddr = inet6_csk_addr2sockaddr,
1079 .sockaddr_len = sizeof(struct sockaddr_in6),
1080 #ifdef CONFIG_COMPAT
1081 .compat_setsockopt = compat_ipv6_setsockopt,
1082 .compat_getsockopt = compat_ipv6_getsockopt,
1083 #endif
1084 };
1085
1086 /* NOTE: A lot of things set to zero explicitly by call to
1087 * sk_alloc() so need not be done here.
1088 */
1089 static int dccp_v6_init_sock(struct sock *sk)
1090 {
1091 static __u8 dccp_v6_ctl_sock_initialized;
1092 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1093
1094 if (err == 0) {
1095 if (unlikely(!dccp_v6_ctl_sock_initialized))
1096 dccp_v6_ctl_sock_initialized = 1;
1097 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1098 }
1099
1100 return err;
1101 }
1102
1103 static void dccp_v6_destroy_sock(struct sock *sk)
1104 {
1105 dccp_destroy_sock(sk);
1106 inet6_destroy_sock(sk);
1107 }
1108
1109 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1110 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1111 };
1112
1113 static struct proto dccp_v6_prot = {
1114 .name = "DCCPv6",
1115 .owner = THIS_MODULE,
1116 .close = dccp_close,
1117 .connect = dccp_v6_connect,
1118 .disconnect = dccp_disconnect,
1119 .ioctl = dccp_ioctl,
1120 .init = dccp_v6_init_sock,
1121 .setsockopt = dccp_setsockopt,
1122 .getsockopt = dccp_getsockopt,
1123 .sendmsg = dccp_sendmsg,
1124 .recvmsg = dccp_recvmsg,
1125 .backlog_rcv = dccp_v6_do_rcv,
1126 .hash = dccp_v6_hash,
1127 .unhash = inet_unhash,
1128 .accept = inet_csk_accept,
1129 .get_port = inet_csk_get_port,
1130 .shutdown = dccp_shutdown,
1131 .destroy = dccp_v6_destroy_sock,
1132 .orphan_count = &dccp_orphan_count,
1133 .max_header = MAX_DCCP_HEADER,
1134 .obj_size = sizeof(struct dccp6_sock),
1135 .rsk_prot = &dccp6_request_sock_ops,
1136 .twsk_prot = &dccp6_timewait_sock_ops,
1137 .h.hashinfo = &dccp_hashinfo,
1138 #ifdef CONFIG_COMPAT
1139 .compat_setsockopt = compat_dccp_setsockopt,
1140 .compat_getsockopt = compat_dccp_getsockopt,
1141 #endif
1142 };
1143
1144 static struct inet6_protocol dccp_v6_protocol = {
1145 .handler = dccp_v6_rcv,
1146 .err_handler = dccp_v6_err,
1147 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1148 };
1149
1150 static struct proto_ops inet6_dccp_ops = {
1151 .family = PF_INET6,
1152 .owner = THIS_MODULE,
1153 .release = inet6_release,
1154 .bind = inet6_bind,
1155 .connect = inet_stream_connect,
1156 .socketpair = sock_no_socketpair,
1157 .accept = inet_accept,
1158 .getname = inet6_getname,
1159 .poll = dccp_poll,
1160 .ioctl = inet6_ioctl,
1161 .listen = inet_dccp_listen,
1162 .shutdown = inet_shutdown,
1163 .setsockopt = sock_common_setsockopt,
1164 .getsockopt = sock_common_getsockopt,
1165 .sendmsg = inet_sendmsg,
1166 .recvmsg = sock_common_recvmsg,
1167 .mmap = sock_no_mmap,
1168 .sendpage = sock_no_sendpage,
1169 #ifdef CONFIG_COMPAT
1170 .compat_setsockopt = compat_sock_common_setsockopt,
1171 .compat_getsockopt = compat_sock_common_getsockopt,
1172 #endif
1173 };
1174
1175 static struct inet_protosw dccp_v6_protosw = {
1176 .type = SOCK_DCCP,
1177 .protocol = IPPROTO_DCCP,
1178 .prot = &dccp_v6_prot,
1179 .ops = &inet6_dccp_ops,
1180 .capability = -1,
1181 .flags = INET_PROTOSW_ICSK,
1182 };
1183
1184 static int dccp_v6_init_net(struct net *net)
1185 {
1186 int err;
1187
1188 err = inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1189 SOCK_DCCP, IPPROTO_DCCP, net);
1190 return err;
1191 }
1192
1193 static void dccp_v6_exit_net(struct net *net)
1194 {
1195 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1196 }
1197
1198 static struct pernet_operations dccp_v6_ops = {
1199 .init = dccp_v6_init_net,
1200 .exit = dccp_v6_exit_net,
1201 };
1202
1203 static int __init dccp_v6_init(void)
1204 {
1205 int err = proto_register(&dccp_v6_prot, 1);
1206
1207 if (err != 0)
1208 goto out;
1209
1210 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1211 if (err != 0)
1212 goto out_unregister_proto;
1213
1214 inet6_register_protosw(&dccp_v6_protosw);
1215
1216 err = register_pernet_subsys(&dccp_v6_ops);
1217 if (err != 0)
1218 goto out_destroy_ctl_sock;
1219 out:
1220 return err;
1221
1222 out_destroy_ctl_sock:
1223 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1224 inet6_unregister_protosw(&dccp_v6_protosw);
1225 out_unregister_proto:
1226 proto_unregister(&dccp_v6_prot);
1227 goto out;
1228 }
1229
1230 static void __exit dccp_v6_exit(void)
1231 {
1232 unregister_pernet_subsys(&dccp_v6_ops);
1233 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1234 inet6_unregister_protosw(&dccp_v6_protosw);
1235 proto_unregister(&dccp_v6_prot);
1236 }
1237
1238 module_init(dccp_v6_init);
1239 module_exit(dccp_v6_exit);
1240
1241 /*
1242 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1243 * values directly, Also cover the case where the protocol is not specified,
1244 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1245 */
1246 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1247 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1248 MODULE_LICENSE("GPL");
1249 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1250 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");