Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/xfrm.h>
18
19 #include <net/addrconf.h>
20 #include <net/inet_common.h>
21 #include <net/inet_hashtables.h>
22 #include <net/inet_sock.h>
23 #include <net/inet6_connection_sock.h>
24 #include <net/inet6_hashtables.h>
25 #include <net/ip6_route.h>
26 #include <net/ipv6.h>
27 #include <net/protocol.h>
28 #include <net/transp_v6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/xfrm.h>
31
32 #include "dccp.h"
33 #include "ipv6.h"
34 #include "feat.h"
35
36 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
37
38 static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
39 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
40
41 static void dccp_v6_hash(struct sock *sk)
42 {
43 if (sk->sk_state != DCCP_CLOSED) {
44 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
45 inet_hash(sk);
46 return;
47 }
48 local_bh_disable();
49 __inet6_hash(sk);
50 local_bh_enable();
51 }
52 }
53
54 /* add pseudo-header to DCCP checksum stored in skb->csum */
55 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
56 struct in6_addr *saddr,
57 struct in6_addr *daddr)
58 {
59 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
60 }
61
62 static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
63 struct sk_buff *skb)
64 {
65 struct ipv6_pinfo *np = inet6_sk(sk);
66 struct dccp_hdr *dh = dccp_hdr(skb);
67
68 dccp_csum_outgoing(skb);
69 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
70 }
71
72 static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
73 __be16 sport, __be16 dport )
74 {
75 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
76 }
77
78 static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
79 {
80 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
81 ipv6_hdr(skb)->saddr.s6_addr32,
82 dccp_hdr(skb)->dccph_dport,
83 dccp_hdr(skb)->dccph_sport );
84
85 }
86
87 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
88 int type, int code, int offset, __be32 info)
89 {
90 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
92 struct dccp_sock *dp;
93 struct ipv6_pinfo *np;
94 struct sock *sk;
95 int err;
96 __u64 seq;
97 struct net *net = dev_net(skb->dev);
98
99 if (skb->len < offset + sizeof(*dh) ||
100 skb->len < offset + __dccp_basic_hdr_len(dh)) {
101 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
102 return;
103 }
104
105 sk = inet6_lookup(net, &dccp_hashinfo,
106 &hdr->daddr, dh->dccph_dport,
107 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
108
109 if (sk == NULL) {
110 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
111 return;
112 }
113
114 if (sk->sk_state == DCCP_TIME_WAIT) {
115 inet_twsk_put(inet_twsk(sk));
116 return;
117 }
118
119 bh_lock_sock(sk);
120 if (sock_owned_by_user(sk))
121 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
122
123 if (sk->sk_state == DCCP_CLOSED)
124 goto out;
125
126 dp = dccp_sk(sk);
127 seq = dccp_hdr_seq(dh);
128 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
129 !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
130 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
131 goto out;
132 }
133
134 np = inet6_sk(sk);
135
136 if (type == ICMPV6_PKT_TOOBIG) {
137 struct dst_entry *dst = NULL;
138
139 if (sock_owned_by_user(sk))
140 goto out;
141 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
142 goto out;
143
144 /* icmp should have updated the destination cache entry */
145 dst = __sk_dst_check(sk, np->dst_cookie);
146 if (dst == NULL) {
147 struct inet_sock *inet = inet_sk(sk);
148 struct flowi fl;
149
150 /* BUGGG_FUTURE: Again, it is not clear how
151 to handle rthdr case. Ignore this complexity
152 for now.
153 */
154 memset(&fl, 0, sizeof(fl));
155 fl.proto = IPPROTO_DCCP;
156 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
157 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
158 fl.oif = sk->sk_bound_dev_if;
159 fl.fl_ip_dport = inet->dport;
160 fl.fl_ip_sport = inet->sport;
161 security_sk_classify_flow(sk, &fl);
162
163 err = ip6_dst_lookup(sk, &dst, &fl);
164 if (err) {
165 sk->sk_err_soft = -err;
166 goto out;
167 }
168
169 err = xfrm_lookup(&dst, &fl, sk, 0);
170 if (err < 0) {
171 sk->sk_err_soft = -err;
172 goto out;
173 }
174 } else
175 dst_hold(dst);
176
177 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
178 dccp_sync_mss(sk, dst_mtu(dst));
179 } /* else let the usual retransmit timer handle it */
180 dst_release(dst);
181 goto out;
182 }
183
184 icmpv6_err_convert(type, code, &err);
185
186 /* Might be for an request_sock */
187 switch (sk->sk_state) {
188 struct request_sock *req, **prev;
189 case DCCP_LISTEN:
190 if (sock_owned_by_user(sk))
191 goto out;
192
193 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
194 &hdr->daddr, &hdr->saddr,
195 inet6_iif(skb));
196 if (req == NULL)
197 goto out;
198
199 /*
200 * ICMPs are not backlogged, hence we cannot get an established
201 * socket here.
202 */
203 WARN_ON(req->sk != NULL);
204
205 if (seq != dccp_rsk(req)->dreq_iss) {
206 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
207 goto out;
208 }
209
210 inet_csk_reqsk_queue_drop(sk, req, prev);
211 goto out;
212
213 case DCCP_REQUESTING:
214 case DCCP_RESPOND: /* Cannot happen.
215 It can, it SYNs are crossed. --ANK */
216 if (!sock_owned_by_user(sk)) {
217 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
218 sk->sk_err = err;
219 /*
220 * Wake people up to see the error
221 * (see connect in sock.c)
222 */
223 sk->sk_error_report(sk);
224 dccp_done(sk);
225 } else
226 sk->sk_err_soft = err;
227 goto out;
228 }
229
230 if (!sock_owned_by_user(sk) && np->recverr) {
231 sk->sk_err = err;
232 sk->sk_error_report(sk);
233 } else
234 sk->sk_err_soft = err;
235
236 out:
237 bh_unlock_sock(sk);
238 sock_put(sk);
239 }
240
241
242 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
243 {
244 struct inet6_request_sock *ireq6 = inet6_rsk(req);
245 struct ipv6_pinfo *np = inet6_sk(sk);
246 struct sk_buff *skb;
247 struct ipv6_txoptions *opt = NULL;
248 struct in6_addr *final_p = NULL, final;
249 struct flowi fl;
250 int err = -1;
251 struct dst_entry *dst;
252
253 memset(&fl, 0, sizeof(fl));
254 fl.proto = IPPROTO_DCCP;
255 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
256 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
257 fl.fl6_flowlabel = 0;
258 fl.oif = ireq6->iif;
259 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
260 fl.fl_ip_sport = inet_sk(sk)->sport;
261 security_req_classify_flow(req, &fl);
262
263 opt = np->opt;
264
265 if (opt != NULL && opt->srcrt != NULL) {
266 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
267
268 ipv6_addr_copy(&final, &fl.fl6_dst);
269 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
270 final_p = &final;
271 }
272
273 err = ip6_dst_lookup(sk, &dst, &fl);
274 if (err)
275 goto done;
276
277 if (final_p)
278 ipv6_addr_copy(&fl.fl6_dst, final_p);
279
280 err = xfrm_lookup(&dst, &fl, sk, 0);
281 if (err < 0)
282 goto done;
283
284 skb = dccp_make_response(sk, dst, req);
285 if (skb != NULL) {
286 struct dccp_hdr *dh = dccp_hdr(skb);
287
288 dh->dccph_checksum = dccp_v6_csum_finish(skb,
289 &ireq6->loc_addr,
290 &ireq6->rmt_addr);
291 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
292 err = ip6_xmit(sk, skb, &fl, opt, 0);
293 err = net_xmit_eval(err);
294 }
295
296 done:
297 if (opt != NULL && opt != np->opt)
298 sock_kfree_s(sk, opt, opt->tot_len);
299 dst_release(dst);
300 return err;
301 }
302
303 static void dccp_v6_reqsk_destructor(struct request_sock *req)
304 {
305 if (inet6_rsk(req)->pktopts != NULL)
306 kfree_skb(inet6_rsk(req)->pktopts);
307 }
308
309 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
310 {
311 struct ipv6hdr *rxip6h;
312 struct sk_buff *skb;
313 struct flowi fl;
314 struct net *net = dev_net(rxskb->dst->dev);
315 struct sock *ctl_sk = net->dccp.v6_ctl_sk;
316
317 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
318 return;
319
320 if (!ipv6_unicast_destination(rxskb))
321 return;
322
323 skb = dccp_ctl_make_reset(ctl_sk, rxskb);
324 if (skb == NULL)
325 return;
326
327 rxip6h = ipv6_hdr(rxskb);
328 dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
329 &rxip6h->daddr);
330
331 memset(&fl, 0, sizeof(fl));
332 ipv6_addr_copy(&fl.fl6_dst, &rxip6h->saddr);
333 ipv6_addr_copy(&fl.fl6_src, &rxip6h->daddr);
334
335 fl.proto = IPPROTO_DCCP;
336 fl.oif = inet6_iif(rxskb);
337 fl.fl_ip_dport = dccp_hdr(skb)->dccph_dport;
338 fl.fl_ip_sport = dccp_hdr(skb)->dccph_sport;
339 security_skb_classify_flow(rxskb, &fl);
340
341 /* sk = NULL, but it is safe for now. RST socket required. */
342 if (!ip6_dst_lookup(ctl_sk, &skb->dst, &fl)) {
343 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
344 ip6_xmit(ctl_sk, skb, &fl, NULL, 0);
345 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
346 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
347 return;
348 }
349 }
350
351 kfree_skb(skb);
352 }
353
354 static struct request_sock_ops dccp6_request_sock_ops = {
355 .family = AF_INET6,
356 .obj_size = sizeof(struct dccp6_request_sock),
357 .rtx_syn_ack = dccp_v6_send_response,
358 .send_ack = dccp_reqsk_send_ack,
359 .destructor = dccp_v6_reqsk_destructor,
360 .send_reset = dccp_v6_ctl_send_reset,
361 };
362
363 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
364 {
365 const struct dccp_hdr *dh = dccp_hdr(skb);
366 const struct ipv6hdr *iph = ipv6_hdr(skb);
367 struct sock *nsk;
368 struct request_sock **prev;
369 /* Find possible connection requests. */
370 struct request_sock *req = inet6_csk_search_req(sk, &prev,
371 dh->dccph_sport,
372 &iph->saddr,
373 &iph->daddr,
374 inet6_iif(skb));
375 if (req != NULL)
376 return dccp_check_req(sk, skb, req, prev);
377
378 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
379 &iph->saddr, dh->dccph_sport,
380 &iph->daddr, ntohs(dh->dccph_dport),
381 inet6_iif(skb));
382 if (nsk != NULL) {
383 if (nsk->sk_state != DCCP_TIME_WAIT) {
384 bh_lock_sock(nsk);
385 return nsk;
386 }
387 inet_twsk_put(inet_twsk(nsk));
388 return NULL;
389 }
390
391 return sk;
392 }
393
394 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
395 {
396 struct request_sock *req;
397 struct dccp_request_sock *dreq;
398 struct inet6_request_sock *ireq6;
399 struct ipv6_pinfo *np = inet6_sk(sk);
400 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
401 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
402
403 if (skb->protocol == htons(ETH_P_IP))
404 return dccp_v4_conn_request(sk, skb);
405
406 if (!ipv6_unicast_destination(skb))
407 return 0; /* discard, don't send a reset here */
408
409 if (dccp_bad_service_code(sk, service)) {
410 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
411 goto drop;
412 }
413 /*
414 * There are no SYN attacks on IPv6, yet...
415 */
416 dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
417 if (inet_csk_reqsk_queue_is_full(sk))
418 goto drop;
419
420 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
421 goto drop;
422
423 req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
424 if (req == NULL)
425 goto drop;
426
427 dccp_reqsk_init(req, skb);
428
429 dreq = dccp_rsk(req);
430 if (dccp_parse_options(sk, dreq, skb))
431 goto drop_and_free;
432
433 if (security_inet_conn_request(sk, skb, req))
434 goto drop_and_free;
435
436 ireq6 = inet6_rsk(req);
437 ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
438 ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
439
440 if (ipv6_opt_accepted(sk, skb) ||
441 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
442 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
443 atomic_inc(&skb->users);
444 ireq6->pktopts = skb;
445 }
446 ireq6->iif = sk->sk_bound_dev_if;
447
448 /* So that link locals have meaning */
449 if (!sk->sk_bound_dev_if &&
450 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
451 ireq6->iif = inet6_iif(skb);
452
453 /*
454 * Step 3: Process LISTEN state
455 *
456 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
457 *
458 * In fact we defer setting S.GSR, S.SWL, S.SWH to
459 * dccp_create_openreq_child.
460 */
461 dreq->dreq_isr = dcb->dccpd_seq;
462 dreq->dreq_iss = dccp_v6_init_sequence(skb);
463 dreq->dreq_service = service;
464
465 if (dccp_v6_send_response(sk, req))
466 goto drop_and_free;
467
468 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
469 return 0;
470
471 drop_and_free:
472 reqsk_free(req);
473 drop:
474 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
475 return -1;
476 }
477
478 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
479 struct sk_buff *skb,
480 struct request_sock *req,
481 struct dst_entry *dst)
482 {
483 struct inet6_request_sock *ireq6 = inet6_rsk(req);
484 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
485 struct inet_sock *newinet;
486 struct dccp_sock *newdp;
487 struct dccp6_sock *newdp6;
488 struct sock *newsk;
489 struct ipv6_txoptions *opt;
490
491 if (skb->protocol == htons(ETH_P_IP)) {
492 /*
493 * v6 mapped
494 */
495 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
496 if (newsk == NULL)
497 return NULL;
498
499 newdp6 = (struct dccp6_sock *)newsk;
500 newdp = dccp_sk(newsk);
501 newinet = inet_sk(newsk);
502 newinet->pinet6 = &newdp6->inet6;
503 newnp = inet6_sk(newsk);
504
505 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
506
507 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
508 newinet->daddr);
509
510 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
511 newinet->saddr);
512
513 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
514
515 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
516 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
517 newnp->pktoptions = NULL;
518 newnp->opt = NULL;
519 newnp->mcast_oif = inet6_iif(skb);
520 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
521
522 /*
523 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
524 * here, dccp_create_openreq_child now does this for us, see the comment in
525 * that function for the gory details. -acme
526 */
527
528 /* It is tricky place. Until this moment IPv4 tcp
529 worked with IPv6 icsk.icsk_af_ops.
530 Sync it now.
531 */
532 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
533
534 return newsk;
535 }
536
537 opt = np->opt;
538
539 if (sk_acceptq_is_full(sk))
540 goto out_overflow;
541
542 if (dst == NULL) {
543 struct in6_addr *final_p = NULL, final;
544 struct flowi fl;
545
546 memset(&fl, 0, sizeof(fl));
547 fl.proto = IPPROTO_DCCP;
548 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
549 if (opt != NULL && opt->srcrt != NULL) {
550 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
551
552 ipv6_addr_copy(&final, &fl.fl6_dst);
553 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
554 final_p = &final;
555 }
556 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
557 fl.oif = sk->sk_bound_dev_if;
558 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
559 fl.fl_ip_sport = inet_sk(sk)->sport;
560 security_sk_classify_flow(sk, &fl);
561
562 if (ip6_dst_lookup(sk, &dst, &fl))
563 goto out;
564
565 if (final_p)
566 ipv6_addr_copy(&fl.fl6_dst, final_p);
567
568 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
569 goto out;
570 }
571
572 newsk = dccp_create_openreq_child(sk, req, skb);
573 if (newsk == NULL)
574 goto out;
575
576 /*
577 * No need to charge this sock to the relevant IPv6 refcnt debug socks
578 * count here, dccp_create_openreq_child now does this for us, see the
579 * comment in that function for the gory details. -acme
580 */
581
582 __ip6_dst_store(newsk, dst, NULL, NULL);
583 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
584 NETIF_F_TSO);
585 newdp6 = (struct dccp6_sock *)newsk;
586 newinet = inet_sk(newsk);
587 newinet->pinet6 = &newdp6->inet6;
588 newdp = dccp_sk(newsk);
589 newnp = inet6_sk(newsk);
590
591 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
592
593 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
594 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
595 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
596 newsk->sk_bound_dev_if = ireq6->iif;
597
598 /* Now IPv6 options...
599
600 First: no IPv4 options.
601 */
602 newinet->opt = NULL;
603
604 /* Clone RX bits */
605 newnp->rxopt.all = np->rxopt.all;
606
607 /* Clone pktoptions received with SYN */
608 newnp->pktoptions = NULL;
609 if (ireq6->pktopts != NULL) {
610 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
611 kfree_skb(ireq6->pktopts);
612 ireq6->pktopts = NULL;
613 if (newnp->pktoptions)
614 skb_set_owner_r(newnp->pktoptions, newsk);
615 }
616 newnp->opt = NULL;
617 newnp->mcast_oif = inet6_iif(skb);
618 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
619
620 /*
621 * Clone native IPv6 options from listening socket (if any)
622 *
623 * Yes, keeping reference count would be much more clever, but we make
624 * one more one thing there: reattach optmem to newsk.
625 */
626 if (opt != NULL) {
627 newnp->opt = ipv6_dup_options(newsk, opt);
628 if (opt != np->opt)
629 sock_kfree_s(sk, opt, opt->tot_len);
630 }
631
632 inet_csk(newsk)->icsk_ext_hdr_len = 0;
633 if (newnp->opt != NULL)
634 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
635 newnp->opt->opt_flen);
636
637 dccp_sync_mss(newsk, dst_mtu(dst));
638
639 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
640
641 __inet6_hash(newsk);
642 __inet_inherit_port(sk, newsk);
643
644 return newsk;
645
646 out_overflow:
647 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
648 out:
649 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
650 if (opt != NULL && opt != np->opt)
651 sock_kfree_s(sk, opt, opt->tot_len);
652 dst_release(dst);
653 return NULL;
654 }
655
656 /* The socket must have it's spinlock held when we get
657 * here.
658 *
659 * We have a potential double-lock case here, so even when
660 * doing backlog processing we use the BH locking scheme.
661 * This is because we cannot sleep with the original spinlock
662 * held.
663 */
664 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
665 {
666 struct ipv6_pinfo *np = inet6_sk(sk);
667 struct sk_buff *opt_skb = NULL;
668
669 /* Imagine: socket is IPv6. IPv4 packet arrives,
670 goes to IPv4 receive handler and backlogged.
671 From backlog it always goes here. Kerboom...
672 Fortunately, dccp_rcv_established and rcv_established
673 handle them correctly, but it is not case with
674 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
675 */
676
677 if (skb->protocol == htons(ETH_P_IP))
678 return dccp_v4_do_rcv(sk, skb);
679
680 if (sk_filter(sk, skb))
681 goto discard;
682
683 /*
684 * socket locking is here for SMP purposes as backlog rcv is currently
685 * called with bh processing disabled.
686 */
687
688 /* Do Stevens' IPV6_PKTOPTIONS.
689
690 Yes, guys, it is the only place in our code, where we
691 may make it not affecting IPv4.
692 The rest of code is protocol independent,
693 and I do not like idea to uglify IPv4.
694
695 Actually, all the idea behind IPV6_PKTOPTIONS
696 looks not very well thought. For now we latch
697 options, received in the last packet, enqueued
698 by tcp. Feel free to propose better solution.
699 --ANK (980728)
700 */
701 if (np->rxopt.all)
702 /*
703 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
704 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
705 */
706 opt_skb = skb_clone(skb, GFP_ATOMIC);
707
708 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
709 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
710 goto reset;
711 if (opt_skb) {
712 /* XXX This is where we would goto ipv6_pktoptions. */
713 __kfree_skb(opt_skb);
714 }
715 return 0;
716 }
717
718 /*
719 * Step 3: Process LISTEN state
720 * If S.state == LISTEN,
721 * If P.type == Request or P contains a valid Init Cookie option,
722 * (* Must scan the packet's options to check for Init
723 * Cookies. Only Init Cookies are processed here,
724 * however; other options are processed in Step 8. This
725 * scan need only be performed if the endpoint uses Init
726 * Cookies *)
727 * (* Generate a new socket and switch to that socket *)
728 * Set S := new socket for this port pair
729 * S.state = RESPOND
730 * Choose S.ISS (initial seqno) or set from Init Cookies
731 * Initialize S.GAR := S.ISS
732 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
733 * Continue with S.state == RESPOND
734 * (* A Response packet will be generated in Step 11 *)
735 * Otherwise,
736 * Generate Reset(No Connection) unless P.type == Reset
737 * Drop packet and return
738 *
739 * NOTE: the check for the packet types is done in
740 * dccp_rcv_state_process
741 */
742 if (sk->sk_state == DCCP_LISTEN) {
743 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
744
745 if (nsk == NULL)
746 goto discard;
747 /*
748 * Queue it on the new socket if the new socket is active,
749 * otherwise we just shortcircuit this and continue with
750 * the new socket..
751 */
752 if (nsk != sk) {
753 if (dccp_child_process(sk, nsk, skb))
754 goto reset;
755 if (opt_skb != NULL)
756 __kfree_skb(opt_skb);
757 return 0;
758 }
759 }
760
761 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
762 goto reset;
763 if (opt_skb) {
764 /* XXX This is where we would goto ipv6_pktoptions. */
765 __kfree_skb(opt_skb);
766 }
767 return 0;
768
769 reset:
770 dccp_v6_ctl_send_reset(sk, skb);
771 discard:
772 if (opt_skb != NULL)
773 __kfree_skb(opt_skb);
774 kfree_skb(skb);
775 return 0;
776 }
777
778 static int dccp_v6_rcv(struct sk_buff *skb)
779 {
780 const struct dccp_hdr *dh;
781 struct sock *sk;
782 int min_cov;
783
784 /* Step 1: Check header basics */
785
786 if (dccp_invalid_packet(skb))
787 goto discard_it;
788
789 /* Step 1: If header checksum is incorrect, drop packet and return. */
790 if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
791 &ipv6_hdr(skb)->daddr)) {
792 DCCP_WARN("dropped packet with invalid checksum\n");
793 goto discard_it;
794 }
795
796 dh = dccp_hdr(skb);
797
798 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
799 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
800
801 if (dccp_packet_without_ack(skb))
802 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
803 else
804 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
805
806 /* Step 2:
807 * Look up flow ID in table and get corresponding socket */
808 sk = __inet6_lookup(dev_net(skb->dst->dev), &dccp_hashinfo,
809 &ipv6_hdr(skb)->saddr, dh->dccph_sport,
810 &ipv6_hdr(skb)->daddr, ntohs(dh->dccph_dport),
811 inet6_iif(skb));
812 /*
813 * Step 2:
814 * If no socket ...
815 */
816 if (sk == NULL) {
817 dccp_pr_debug("failed to look up flow ID in table and "
818 "get corresponding socket\n");
819 goto no_dccp_socket;
820 }
821
822 /*
823 * Step 2:
824 * ... or S.state == TIMEWAIT,
825 * Generate Reset(No Connection) unless P.type == Reset
826 * Drop packet and return
827 */
828 if (sk->sk_state == DCCP_TIME_WAIT) {
829 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
830 inet_twsk_put(inet_twsk(sk));
831 goto no_dccp_socket;
832 }
833
834 /*
835 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
836 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
837 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
838 */
839 min_cov = dccp_sk(sk)->dccps_pcrlen;
840 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
841 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
842 dh->dccph_cscov, min_cov);
843 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
844 goto discard_and_relse;
845 }
846
847 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
848 goto discard_and_relse;
849
850 return sk_receive_skb(sk, skb, 1) ? -1 : 0;
851
852 no_dccp_socket:
853 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
854 goto discard_it;
855 /*
856 * Step 2:
857 * If no socket ...
858 * Generate Reset(No Connection) unless P.type == Reset
859 * Drop packet and return
860 */
861 if (dh->dccph_type != DCCP_PKT_RESET) {
862 DCCP_SKB_CB(skb)->dccpd_reset_code =
863 DCCP_RESET_CODE_NO_CONNECTION;
864 dccp_v6_ctl_send_reset(sk, skb);
865 }
866
867 discard_it:
868 kfree_skb(skb);
869 return 0;
870
871 discard_and_relse:
872 sock_put(sk);
873 goto discard_it;
874 }
875
876 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
877 int addr_len)
878 {
879 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
880 struct inet_connection_sock *icsk = inet_csk(sk);
881 struct inet_sock *inet = inet_sk(sk);
882 struct ipv6_pinfo *np = inet6_sk(sk);
883 struct dccp_sock *dp = dccp_sk(sk);
884 struct in6_addr *saddr = NULL, *final_p = NULL, final;
885 struct flowi fl;
886 struct dst_entry *dst;
887 int addr_type;
888 int err;
889
890 dp->dccps_role = DCCP_ROLE_CLIENT;
891
892 if (addr_len < SIN6_LEN_RFC2133)
893 return -EINVAL;
894
895 if (usin->sin6_family != AF_INET6)
896 return -EAFNOSUPPORT;
897
898 memset(&fl, 0, sizeof(fl));
899
900 if (np->sndflow) {
901 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
902 IP6_ECN_flow_init(fl.fl6_flowlabel);
903 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
904 struct ip6_flowlabel *flowlabel;
905 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
906 if (flowlabel == NULL)
907 return -EINVAL;
908 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
909 fl6_sock_release(flowlabel);
910 }
911 }
912 /*
913 * connect() to INADDR_ANY means loopback (BSD'ism).
914 */
915 if (ipv6_addr_any(&usin->sin6_addr))
916 usin->sin6_addr.s6_addr[15] = 1;
917
918 addr_type = ipv6_addr_type(&usin->sin6_addr);
919
920 if (addr_type & IPV6_ADDR_MULTICAST)
921 return -ENETUNREACH;
922
923 if (addr_type & IPV6_ADDR_LINKLOCAL) {
924 if (addr_len >= sizeof(struct sockaddr_in6) &&
925 usin->sin6_scope_id) {
926 /* If interface is set while binding, indices
927 * must coincide.
928 */
929 if (sk->sk_bound_dev_if &&
930 sk->sk_bound_dev_if != usin->sin6_scope_id)
931 return -EINVAL;
932
933 sk->sk_bound_dev_if = usin->sin6_scope_id;
934 }
935
936 /* Connect to link-local address requires an interface */
937 if (!sk->sk_bound_dev_if)
938 return -EINVAL;
939 }
940
941 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
942 np->flow_label = fl.fl6_flowlabel;
943
944 /*
945 * DCCP over IPv4
946 */
947 if (addr_type == IPV6_ADDR_MAPPED) {
948 u32 exthdrlen = icsk->icsk_ext_hdr_len;
949 struct sockaddr_in sin;
950
951 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
952
953 if (__ipv6_only_sock(sk))
954 return -ENETUNREACH;
955
956 sin.sin_family = AF_INET;
957 sin.sin_port = usin->sin6_port;
958 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
959
960 icsk->icsk_af_ops = &dccp_ipv6_mapped;
961 sk->sk_backlog_rcv = dccp_v4_do_rcv;
962
963 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
964 if (err) {
965 icsk->icsk_ext_hdr_len = exthdrlen;
966 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
967 sk->sk_backlog_rcv = dccp_v6_do_rcv;
968 goto failure;
969 } else {
970 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
971 inet->saddr);
972 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
973 inet->rcv_saddr);
974 }
975
976 return err;
977 }
978
979 if (!ipv6_addr_any(&np->rcv_saddr))
980 saddr = &np->rcv_saddr;
981
982 fl.proto = IPPROTO_DCCP;
983 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
984 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
985 fl.oif = sk->sk_bound_dev_if;
986 fl.fl_ip_dport = usin->sin6_port;
987 fl.fl_ip_sport = inet->sport;
988 security_sk_classify_flow(sk, &fl);
989
990 if (np->opt != NULL && np->opt->srcrt != NULL) {
991 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
992
993 ipv6_addr_copy(&final, &fl.fl6_dst);
994 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
995 final_p = &final;
996 }
997
998 err = ip6_dst_lookup(sk, &dst, &fl);
999 if (err)
1000 goto failure;
1001
1002 if (final_p)
1003 ipv6_addr_copy(&fl.fl6_dst, final_p);
1004
1005 err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT);
1006 if (err < 0) {
1007 if (err == -EREMOTE)
1008 err = ip6_dst_blackhole(sk, &dst, &fl);
1009 if (err < 0)
1010 goto failure;
1011 }
1012
1013 if (saddr == NULL) {
1014 saddr = &fl.fl6_src;
1015 ipv6_addr_copy(&np->rcv_saddr, saddr);
1016 }
1017
1018 /* set the source address */
1019 ipv6_addr_copy(&np->saddr, saddr);
1020 inet->rcv_saddr = LOOPBACK4_IPV6;
1021
1022 __ip6_dst_store(sk, dst, NULL, NULL);
1023
1024 icsk->icsk_ext_hdr_len = 0;
1025 if (np->opt != NULL)
1026 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
1027 np->opt->opt_nflen);
1028
1029 inet->dport = usin->sin6_port;
1030
1031 dccp_set_state(sk, DCCP_REQUESTING);
1032 err = inet6_hash_connect(&dccp_death_row, sk);
1033 if (err)
1034 goto late_failure;
1035
1036 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
1037 np->daddr.s6_addr32,
1038 inet->sport, inet->dport);
1039 err = dccp_connect(sk);
1040 if (err)
1041 goto late_failure;
1042
1043 return 0;
1044
1045 late_failure:
1046 dccp_set_state(sk, DCCP_CLOSED);
1047 __sk_dst_reset(sk);
1048 failure:
1049 inet->dport = 0;
1050 sk->sk_route_caps = 0;
1051 return err;
1052 }
1053
1054 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1055 .queue_xmit = inet6_csk_xmit,
1056 .send_check = dccp_v6_send_check,
1057 .rebuild_header = inet6_sk_rebuild_header,
1058 .conn_request = dccp_v6_conn_request,
1059 .syn_recv_sock = dccp_v6_request_recv_sock,
1060 .net_header_len = sizeof(struct ipv6hdr),
1061 .setsockopt = ipv6_setsockopt,
1062 .getsockopt = ipv6_getsockopt,
1063 .addr2sockaddr = inet6_csk_addr2sockaddr,
1064 .sockaddr_len = sizeof(struct sockaddr_in6),
1065 .bind_conflict = inet6_csk_bind_conflict,
1066 #ifdef CONFIG_COMPAT
1067 .compat_setsockopt = compat_ipv6_setsockopt,
1068 .compat_getsockopt = compat_ipv6_getsockopt,
1069 #endif
1070 };
1071
1072 /*
1073 * DCCP over IPv4 via INET6 API
1074 */
1075 static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1076 .queue_xmit = ip_queue_xmit,
1077 .send_check = dccp_v4_send_check,
1078 .rebuild_header = inet_sk_rebuild_header,
1079 .conn_request = dccp_v6_conn_request,
1080 .syn_recv_sock = dccp_v6_request_recv_sock,
1081 .net_header_len = sizeof(struct iphdr),
1082 .setsockopt = ipv6_setsockopt,
1083 .getsockopt = ipv6_getsockopt,
1084 .addr2sockaddr = inet6_csk_addr2sockaddr,
1085 .sockaddr_len = sizeof(struct sockaddr_in6),
1086 #ifdef CONFIG_COMPAT
1087 .compat_setsockopt = compat_ipv6_setsockopt,
1088 .compat_getsockopt = compat_ipv6_getsockopt,
1089 #endif
1090 };
1091
1092 /* NOTE: A lot of things set to zero explicitly by call to
1093 * sk_alloc() so need not be done here.
1094 */
1095 static int dccp_v6_init_sock(struct sock *sk)
1096 {
1097 static __u8 dccp_v6_ctl_sock_initialized;
1098 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1099
1100 if (err == 0) {
1101 if (unlikely(!dccp_v6_ctl_sock_initialized))
1102 dccp_v6_ctl_sock_initialized = 1;
1103 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1104 }
1105
1106 return err;
1107 }
1108
1109 static void dccp_v6_destroy_sock(struct sock *sk)
1110 {
1111 dccp_destroy_sock(sk);
1112 inet6_destroy_sock(sk);
1113 }
1114
1115 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1116 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1117 };
1118
1119 static struct proto dccp_v6_prot = {
1120 .name = "DCCPv6",
1121 .owner = THIS_MODULE,
1122 .close = dccp_close,
1123 .connect = dccp_v6_connect,
1124 .disconnect = dccp_disconnect,
1125 .ioctl = dccp_ioctl,
1126 .init = dccp_v6_init_sock,
1127 .setsockopt = dccp_setsockopt,
1128 .getsockopt = dccp_getsockopt,
1129 .sendmsg = dccp_sendmsg,
1130 .recvmsg = dccp_recvmsg,
1131 .backlog_rcv = dccp_v6_do_rcv,
1132 .hash = dccp_v6_hash,
1133 .unhash = inet_unhash,
1134 .accept = inet_csk_accept,
1135 .get_port = inet_csk_get_port,
1136 .shutdown = dccp_shutdown,
1137 .destroy = dccp_v6_destroy_sock,
1138 .orphan_count = &dccp_orphan_count,
1139 .max_header = MAX_DCCP_HEADER,
1140 .obj_size = sizeof(struct dccp6_sock),
1141 .rsk_prot = &dccp6_request_sock_ops,
1142 .twsk_prot = &dccp6_timewait_sock_ops,
1143 .h.hashinfo = &dccp_hashinfo,
1144 #ifdef CONFIG_COMPAT
1145 .compat_setsockopt = compat_dccp_setsockopt,
1146 .compat_getsockopt = compat_dccp_getsockopt,
1147 #endif
1148 };
1149
1150 static struct inet6_protocol dccp_v6_protocol = {
1151 .handler = dccp_v6_rcv,
1152 .err_handler = dccp_v6_err,
1153 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1154 };
1155
1156 static struct proto_ops inet6_dccp_ops = {
1157 .family = PF_INET6,
1158 .owner = THIS_MODULE,
1159 .release = inet6_release,
1160 .bind = inet6_bind,
1161 .connect = inet_stream_connect,
1162 .socketpair = sock_no_socketpair,
1163 .accept = inet_accept,
1164 .getname = inet6_getname,
1165 .poll = dccp_poll,
1166 .ioctl = inet6_ioctl,
1167 .listen = inet_dccp_listen,
1168 .shutdown = inet_shutdown,
1169 .setsockopt = sock_common_setsockopt,
1170 .getsockopt = sock_common_getsockopt,
1171 .sendmsg = inet_sendmsg,
1172 .recvmsg = sock_common_recvmsg,
1173 .mmap = sock_no_mmap,
1174 .sendpage = sock_no_sendpage,
1175 #ifdef CONFIG_COMPAT
1176 .compat_setsockopt = compat_sock_common_setsockopt,
1177 .compat_getsockopt = compat_sock_common_getsockopt,
1178 #endif
1179 };
1180
1181 static struct inet_protosw dccp_v6_protosw = {
1182 .type = SOCK_DCCP,
1183 .protocol = IPPROTO_DCCP,
1184 .prot = &dccp_v6_prot,
1185 .ops = &inet6_dccp_ops,
1186 .capability = -1,
1187 .flags = INET_PROTOSW_ICSK,
1188 };
1189
1190 static int dccp_v6_init_net(struct net *net)
1191 {
1192 int err;
1193
1194 err = inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1195 SOCK_DCCP, IPPROTO_DCCP, net);
1196 return err;
1197 }
1198
1199 static void dccp_v6_exit_net(struct net *net)
1200 {
1201 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1202 }
1203
1204 static struct pernet_operations dccp_v6_ops = {
1205 .init = dccp_v6_init_net,
1206 .exit = dccp_v6_exit_net,
1207 };
1208
1209 static int __init dccp_v6_init(void)
1210 {
1211 int err = proto_register(&dccp_v6_prot, 1);
1212
1213 if (err != 0)
1214 goto out;
1215
1216 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1217 if (err != 0)
1218 goto out_unregister_proto;
1219
1220 inet6_register_protosw(&dccp_v6_protosw);
1221
1222 err = register_pernet_subsys(&dccp_v6_ops);
1223 if (err != 0)
1224 goto out_destroy_ctl_sock;
1225 out:
1226 return err;
1227
1228 out_destroy_ctl_sock:
1229 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1230 inet6_unregister_protosw(&dccp_v6_protosw);
1231 out_unregister_proto:
1232 proto_unregister(&dccp_v6_prot);
1233 goto out;
1234 }
1235
1236 static void __exit dccp_v6_exit(void)
1237 {
1238 unregister_pernet_subsys(&dccp_v6_ops);
1239 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1240 inet6_unregister_protosw(&dccp_v6_protosw);
1241 proto_unregister(&dccp_v6_prot);
1242 }
1243
1244 module_init(dccp_v6_init);
1245 module_exit(dccp_v6_exit);
1246
1247 /*
1248 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1249 * values directly, Also cover the case where the protocol is not specified,
1250 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1251 */
1252 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1253 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1254 MODULE_LICENSE("GPL");
1255 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1256 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");