Merge branch 'sundance'
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/xfrm.h>
19
20 #include <net/addrconf.h>
21 #include <net/inet_common.h>
22 #include <net/inet_hashtables.h>
23 #include <net/inet_sock.h>
24 #include <net/inet6_connection_sock.h>
25 #include <net/inet6_hashtables.h>
26 #include <net/ip6_route.h>
27 #include <net/ipv6.h>
28 #include <net/protocol.h>
29 #include <net/transp_v6.h>
30 #include <net/ip6_checksum.h>
31 #include <net/xfrm.h>
32
33 #include "dccp.h"
34 #include "ipv6.h"
35
36 static void dccp_v6_ctl_send_reset(struct sk_buff *skb);
37 static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
38 struct request_sock *req);
39 static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb);
40
41 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
42
43 static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
44 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
45
46 static int dccp_v6_get_port(struct sock *sk, unsigned short snum)
47 {
48 return inet_csk_get_port(&dccp_hashinfo, sk, snum,
49 inet6_csk_bind_conflict);
50 }
51
52 static void dccp_v6_hash(struct sock *sk)
53 {
54 if (sk->sk_state != DCCP_CLOSED) {
55 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
56 dccp_prot.hash(sk);
57 return;
58 }
59 local_bh_disable();
60 __inet6_hash(&dccp_hashinfo, sk);
61 local_bh_enable();
62 }
63 }
64
65 static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len,
66 struct in6_addr *saddr,
67 struct in6_addr *daddr,
68 unsigned long base)
69 {
70 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base);
71 }
72
73 static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
74 {
75 const struct dccp_hdr *dh = dccp_hdr(skb);
76
77 if (skb->protocol == htons(ETH_P_IPV6))
78 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
79 skb->nh.ipv6h->saddr.s6_addr32,
80 dh->dccph_dport,
81 dh->dccph_sport);
82 else
83 return secure_dccp_sequence_number(skb->nh.iph->daddr,
84 skb->nh.iph->saddr,
85 dh->dccph_dport,
86 dh->dccph_sport);
87 }
88
89 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
90 int addr_len)
91 {
92 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
93 struct inet_connection_sock *icsk = inet_csk(sk);
94 struct inet_sock *inet = inet_sk(sk);
95 struct ipv6_pinfo *np = inet6_sk(sk);
96 struct dccp_sock *dp = dccp_sk(sk);
97 struct in6_addr *saddr = NULL, *final_p = NULL, final;
98 struct flowi fl;
99 struct dst_entry *dst;
100 int addr_type;
101 int err;
102
103 dp->dccps_role = DCCP_ROLE_CLIENT;
104
105 if (addr_len < SIN6_LEN_RFC2133)
106 return -EINVAL;
107
108 if (usin->sin6_family != AF_INET6)
109 return -EAFNOSUPPORT;
110
111 memset(&fl, 0, sizeof(fl));
112
113 if (np->sndflow) {
114 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
115 IP6_ECN_flow_init(fl.fl6_flowlabel);
116 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
117 struct ip6_flowlabel *flowlabel;
118 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
119 if (flowlabel == NULL)
120 return -EINVAL;
121 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
122 fl6_sock_release(flowlabel);
123 }
124 }
125
126 /*
127 * connect() to INADDR_ANY means loopback (BSD'ism).
128 */
129
130 if (ipv6_addr_any(&usin->sin6_addr))
131 usin->sin6_addr.s6_addr[15] = 0x1;
132
133 addr_type = ipv6_addr_type(&usin->sin6_addr);
134
135 if(addr_type & IPV6_ADDR_MULTICAST)
136 return -ENETUNREACH;
137
138 if (addr_type & IPV6_ADDR_LINKLOCAL) {
139 if (addr_len >= sizeof(struct sockaddr_in6) &&
140 usin->sin6_scope_id) {
141 /* If interface is set while binding, indices
142 * must coincide.
143 */
144 if (sk->sk_bound_dev_if &&
145 sk->sk_bound_dev_if != usin->sin6_scope_id)
146 return -EINVAL;
147
148 sk->sk_bound_dev_if = usin->sin6_scope_id;
149 }
150
151 /* Connect to link-local address requires an interface */
152 if (!sk->sk_bound_dev_if)
153 return -EINVAL;
154 }
155
156 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
157 np->flow_label = fl.fl6_flowlabel;
158
159 /*
160 * DCCP over IPv4
161 */
162
163 if (addr_type == IPV6_ADDR_MAPPED) {
164 u32 exthdrlen = icsk->icsk_ext_hdr_len;
165 struct sockaddr_in sin;
166
167 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
168
169 if (__ipv6_only_sock(sk))
170 return -ENETUNREACH;
171
172 sin.sin_family = AF_INET;
173 sin.sin_port = usin->sin6_port;
174 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
175
176 icsk->icsk_af_ops = &dccp_ipv6_mapped;
177 sk->sk_backlog_rcv = dccp_v4_do_rcv;
178
179 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
180
181 if (err) {
182 icsk->icsk_ext_hdr_len = exthdrlen;
183 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
184 sk->sk_backlog_rcv = dccp_v6_do_rcv;
185 goto failure;
186 } else {
187 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
188 inet->saddr);
189 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
190 inet->rcv_saddr);
191 }
192
193 return err;
194 }
195
196 if (!ipv6_addr_any(&np->rcv_saddr))
197 saddr = &np->rcv_saddr;
198
199 fl.proto = IPPROTO_DCCP;
200 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
201 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
202 fl.oif = sk->sk_bound_dev_if;
203 fl.fl_ip_dport = usin->sin6_port;
204 fl.fl_ip_sport = inet->sport;
205
206 if (np->opt && np->opt->srcrt) {
207 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
208 ipv6_addr_copy(&final, &fl.fl6_dst);
209 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
210 final_p = &final;
211 }
212
213 err = ip6_dst_lookup(sk, &dst, &fl);
214 if (err)
215 goto failure;
216 if (final_p)
217 ipv6_addr_copy(&fl.fl6_dst, final_p);
218
219 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
220 goto failure;
221
222 if (saddr == NULL) {
223 saddr = &fl.fl6_src;
224 ipv6_addr_copy(&np->rcv_saddr, saddr);
225 }
226
227 /* set the source address */
228 ipv6_addr_copy(&np->saddr, saddr);
229 inet->rcv_saddr = LOOPBACK4_IPV6;
230
231 ip6_dst_store(sk, dst, NULL);
232
233 icsk->icsk_ext_hdr_len = 0;
234 if (np->opt)
235 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
236 np->opt->opt_nflen);
237
238 inet->dport = usin->sin6_port;
239
240 dccp_set_state(sk, DCCP_REQUESTING);
241 err = inet6_hash_connect(&dccp_death_row, sk);
242 if (err)
243 goto late_failure;
244 /* FIXME */
245 #if 0
246 dp->dccps_gar = secure_dccp_v6_sequence_number(np->saddr.s6_addr32,
247 np->daddr.s6_addr32,
248 inet->sport,
249 inet->dport);
250 #endif
251 err = dccp_connect(sk);
252 if (err)
253 goto late_failure;
254
255 return 0;
256
257 late_failure:
258 dccp_set_state(sk, DCCP_CLOSED);
259 __sk_dst_reset(sk);
260 failure:
261 inet->dport = 0;
262 sk->sk_route_caps = 0;
263 return err;
264 }
265
266 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
267 int type, int code, int offset, __u32 info)
268 {
269 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
270 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
271 struct ipv6_pinfo *np;
272 struct sock *sk;
273 int err;
274 __u64 seq;
275
276 sk = inet6_lookup(&dccp_hashinfo, &hdr->daddr, dh->dccph_dport,
277 &hdr->saddr, dh->dccph_sport, skb->dev->ifindex);
278
279 if (sk == NULL) {
280 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
281 return;
282 }
283
284 if (sk->sk_state == DCCP_TIME_WAIT) {
285 inet_twsk_put((struct inet_timewait_sock *)sk);
286 return;
287 }
288
289 bh_lock_sock(sk);
290 if (sock_owned_by_user(sk))
291 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
292
293 if (sk->sk_state == DCCP_CLOSED)
294 goto out;
295
296 np = inet6_sk(sk);
297
298 if (type == ICMPV6_PKT_TOOBIG) {
299 struct dst_entry *dst = NULL;
300
301 if (sock_owned_by_user(sk))
302 goto out;
303 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
304 goto out;
305
306 /* icmp should have updated the destination cache entry */
307 dst = __sk_dst_check(sk, np->dst_cookie);
308
309 if (dst == NULL) {
310 struct inet_sock *inet = inet_sk(sk);
311 struct flowi fl;
312
313 /* BUGGG_FUTURE: Again, it is not clear how
314 to handle rthdr case. Ignore this complexity
315 for now.
316 */
317 memset(&fl, 0, sizeof(fl));
318 fl.proto = IPPROTO_DCCP;
319 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
320 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
321 fl.oif = sk->sk_bound_dev_if;
322 fl.fl_ip_dport = inet->dport;
323 fl.fl_ip_sport = inet->sport;
324
325 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
326 sk->sk_err_soft = -err;
327 goto out;
328 }
329
330 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
331 sk->sk_err_soft = -err;
332 goto out;
333 }
334
335 } else
336 dst_hold(dst);
337
338 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
339 dccp_sync_mss(sk, dst_mtu(dst));
340 } /* else let the usual retransmit timer handle it */
341 dst_release(dst);
342 goto out;
343 }
344
345 icmpv6_err_convert(type, code, &err);
346
347 seq = DCCP_SKB_CB(skb)->dccpd_seq;
348 /* Might be for an request_sock */
349 switch (sk->sk_state) {
350 struct request_sock *req, **prev;
351 case DCCP_LISTEN:
352 if (sock_owned_by_user(sk))
353 goto out;
354
355 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
356 &hdr->daddr, &hdr->saddr,
357 inet6_iif(skb));
358 if (!req)
359 goto out;
360
361 /* ICMPs are not backlogged, hence we cannot get
362 * an established socket here.
363 */
364 BUG_TRAP(req->sk == NULL);
365
366 if (seq != dccp_rsk(req)->dreq_iss) {
367 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
368 goto out;
369 }
370
371 inet_csk_reqsk_queue_drop(sk, req, prev);
372 goto out;
373
374 case DCCP_REQUESTING:
375 case DCCP_RESPOND: /* Cannot happen.
376 It can, it SYNs are crossed. --ANK */
377 if (!sock_owned_by_user(sk)) {
378 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
379 sk->sk_err = err;
380 /*
381 * Wake people up to see the error
382 * (see connect in sock.c)
383 */
384 sk->sk_error_report(sk);
385
386 dccp_done(sk);
387 } else
388 sk->sk_err_soft = err;
389 goto out;
390 }
391
392 if (!sock_owned_by_user(sk) && np->recverr) {
393 sk->sk_err = err;
394 sk->sk_error_report(sk);
395 } else
396 sk->sk_err_soft = err;
397
398 out:
399 bh_unlock_sock(sk);
400 sock_put(sk);
401 }
402
403
404 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
405 struct dst_entry *dst)
406 {
407 struct inet6_request_sock *ireq6 = inet6_rsk(req);
408 struct ipv6_pinfo *np = inet6_sk(sk);
409 struct sk_buff *skb;
410 struct ipv6_txoptions *opt = NULL;
411 struct in6_addr *final_p = NULL, final;
412 struct flowi fl;
413 int err = -1;
414
415 memset(&fl, 0, sizeof(fl));
416 fl.proto = IPPROTO_DCCP;
417 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
418 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
419 fl.fl6_flowlabel = 0;
420 fl.oif = ireq6->iif;
421 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
422 fl.fl_ip_sport = inet_sk(sk)->sport;
423
424 if (dst == NULL) {
425 opt = np->opt;
426 if (opt == NULL &&
427 np->rxopt.bits.osrcrt == 2 &&
428 ireq6->pktopts) {
429 struct sk_buff *pktopts = ireq6->pktopts;
430 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
431 if (rxopt->srcrt)
432 opt = ipv6_invert_rthdr(sk,
433 (struct ipv6_rt_hdr *)(pktopts->nh.raw +
434 rxopt->srcrt));
435 }
436
437 if (opt && opt->srcrt) {
438 struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
439 ipv6_addr_copy(&final, &fl.fl6_dst);
440 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
441 final_p = &final;
442 }
443
444 err = ip6_dst_lookup(sk, &dst, &fl);
445 if (err)
446 goto done;
447 if (final_p)
448 ipv6_addr_copy(&fl.fl6_dst, final_p);
449 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
450 goto done;
451 }
452
453 skb = dccp_make_response(sk, dst, req);
454 if (skb != NULL) {
455 struct dccp_hdr *dh = dccp_hdr(skb);
456 dh->dccph_checksum = dccp_v6_check(dh, skb->len,
457 &ireq6->loc_addr,
458 &ireq6->rmt_addr,
459 csum_partial((char *)dh,
460 skb->len,
461 skb->csum));
462 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
463 err = ip6_xmit(sk, skb, &fl, opt, 0);
464 if (err == NET_XMIT_CN)
465 err = 0;
466 }
467
468 done:
469 if (opt && opt != np->opt)
470 sock_kfree_s(sk, opt, opt->tot_len);
471 dst_release(dst);
472 return err;
473 }
474
475 static void dccp_v6_reqsk_destructor(struct request_sock *req)
476 {
477 if (inet6_rsk(req)->pktopts != NULL)
478 kfree_skb(inet6_rsk(req)->pktopts);
479 }
480
481 static struct request_sock_ops dccp6_request_sock_ops = {
482 .family = AF_INET6,
483 .obj_size = sizeof(struct dccp6_request_sock),
484 .rtx_syn_ack = dccp_v6_send_response,
485 .send_ack = dccp_v6_reqsk_send_ack,
486 .destructor = dccp_v6_reqsk_destructor,
487 .send_reset = dccp_v6_ctl_send_reset,
488 };
489
490 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
491 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
492 };
493
494 static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
495 {
496 struct ipv6_pinfo *np = inet6_sk(sk);
497 struct dccp_hdr *dh = dccp_hdr(skb);
498
499 dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr,
500 len, IPPROTO_DCCP,
501 csum_partial((char *)dh,
502 dh->dccph_doff << 2,
503 skb->csum));
504 }
505
506 static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
507 {
508 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
509 const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
510 sizeof(struct dccp_hdr_ext) +
511 sizeof(struct dccp_hdr_reset);
512 struct sk_buff *skb;
513 struct flowi fl;
514 u64 seqno;
515
516 if (rxdh->dccph_type == DCCP_PKT_RESET)
517 return;
518
519 if (!ipv6_unicast_destination(rxskb))
520 return;
521
522 /*
523 * We need to grab some memory, and put together an RST,
524 * and then put it into the queue to be sent.
525 */
526
527 skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) +
528 dccp_hdr_reset_len, GFP_ATOMIC);
529 if (skb == NULL)
530 return;
531
532 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) +
533 dccp_hdr_reset_len);
534
535 skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
536 dh = dccp_hdr(skb);
537 memset(dh, 0, dccp_hdr_reset_len);
538
539 /* Swap the send and the receive. */
540 dh->dccph_type = DCCP_PKT_RESET;
541 dh->dccph_sport = rxdh->dccph_dport;
542 dh->dccph_dport = rxdh->dccph_sport;
543 dh->dccph_doff = dccp_hdr_reset_len / 4;
544 dh->dccph_x = 1;
545 dccp_hdr_reset(skb)->dccph_reset_code =
546 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
547
548 /* See "8.3.1. Abnormal Termination" in draft-ietf-dccp-spec-11 */
549 seqno = 0;
550 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
551 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
552
553 dccp_hdr_set_seq(dh, seqno);
554 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
555 DCCP_SKB_CB(rxskb)->dccpd_seq);
556
557 memset(&fl, 0, sizeof(fl));
558 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
559 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
560 dh->dccph_checksum = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
561 sizeof(*dh), IPPROTO_DCCP,
562 skb->csum);
563 fl.proto = IPPROTO_DCCP;
564 fl.oif = inet6_iif(rxskb);
565 fl.fl_ip_dport = dh->dccph_dport;
566 fl.fl_ip_sport = dh->dccph_sport;
567
568 /* sk = NULL, but it is safe for now. RST socket required. */
569 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
570 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
571 ip6_xmit(NULL, skb, &fl, NULL, 0);
572 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
573 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
574 return;
575 }
576 }
577
578 kfree_skb(skb);
579 }
580
581 static void dccp_v6_ctl_send_ack(struct sk_buff *rxskb)
582 {
583 struct flowi fl;
584 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
585 const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
586 sizeof(struct dccp_hdr_ext) +
587 sizeof(struct dccp_hdr_ack_bits);
588 struct sk_buff *skb;
589
590 skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) +
591 dccp_hdr_ack_len, GFP_ATOMIC);
592 if (skb == NULL)
593 return;
594
595 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) +
596 dccp_hdr_ack_len);
597
598 skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
599 dh = dccp_hdr(skb);
600 memset(dh, 0, dccp_hdr_ack_len);
601
602 /* Build DCCP header and checksum it. */
603 dh->dccph_type = DCCP_PKT_ACK;
604 dh->dccph_sport = rxdh->dccph_dport;
605 dh->dccph_dport = rxdh->dccph_sport;
606 dh->dccph_doff = dccp_hdr_ack_len / 4;
607 dh->dccph_x = 1;
608
609 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
610 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
611 DCCP_SKB_CB(rxskb)->dccpd_seq);
612
613 memset(&fl, 0, sizeof(fl));
614 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
615 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
616
617 /* FIXME: calculate checksum, IPv4 also should... */
618
619 fl.proto = IPPROTO_DCCP;
620 fl.oif = inet6_iif(rxskb);
621 fl.fl_ip_dport = dh->dccph_dport;
622 fl.fl_ip_sport = dh->dccph_sport;
623
624 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
625 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
626 ip6_xmit(NULL, skb, &fl, NULL, 0);
627 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
628 return;
629 }
630 }
631
632 kfree_skb(skb);
633 }
634
635 static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
636 struct request_sock *req)
637 {
638 dccp_v6_ctl_send_ack(skb);
639 }
640
641 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
642 {
643 const struct dccp_hdr *dh = dccp_hdr(skb);
644 const struct ipv6hdr *iph = skb->nh.ipv6h;
645 struct sock *nsk;
646 struct request_sock **prev;
647 /* Find possible connection requests. */
648 struct request_sock *req = inet6_csk_search_req(sk, &prev,
649 dh->dccph_sport,
650 &iph->saddr,
651 &iph->daddr,
652 inet6_iif(skb));
653 if (req != NULL)
654 return dccp_check_req(sk, skb, req, prev);
655
656 nsk = __inet6_lookup_established(&dccp_hashinfo,
657 &iph->saddr, dh->dccph_sport,
658 &iph->daddr, ntohs(dh->dccph_dport),
659 inet6_iif(skb));
660
661 if (nsk != NULL) {
662 if (nsk->sk_state != DCCP_TIME_WAIT) {
663 bh_lock_sock(nsk);
664 return nsk;
665 }
666 inet_twsk_put((struct inet_timewait_sock *)nsk);
667 return NULL;
668 }
669
670 return sk;
671 }
672
673 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
674 {
675 struct inet_request_sock *ireq;
676 struct dccp_sock dp;
677 struct request_sock *req;
678 struct dccp_request_sock *dreq;
679 struct inet6_request_sock *ireq6;
680 struct ipv6_pinfo *np = inet6_sk(sk);
681 const __u32 service = dccp_hdr_request(skb)->dccph_req_service;
682 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
683 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
684
685 if (skb->protocol == htons(ETH_P_IP))
686 return dccp_v4_conn_request(sk, skb);
687
688 if (!ipv6_unicast_destination(skb))
689 goto drop;
690
691 if (dccp_bad_service_code(sk, service)) {
692 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
693 goto drop;
694 }
695 /*
696 * There are no SYN attacks on IPv6, yet...
697 */
698 if (inet_csk_reqsk_queue_is_full(sk))
699 goto drop;
700
701 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
702 goto drop;
703
704 req = inet6_reqsk_alloc(sk->sk_prot->rsk_prot);
705 if (req == NULL)
706 goto drop;
707
708 /* FIXME: process options */
709
710 dccp_openreq_init(req, &dp, skb);
711
712 ireq6 = inet6_rsk(req);
713 ireq = inet_rsk(req);
714 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
715 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
716 req->rcv_wnd = 100; /* Fake, option parsing will get the
717 right value */
718 ireq6->pktopts = NULL;
719
720 if (ipv6_opt_accepted(sk, skb) ||
721 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
722 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
723 atomic_inc(&skb->users);
724 ireq6->pktopts = skb;
725 }
726 ireq6->iif = sk->sk_bound_dev_if;
727
728 /* So that link locals have meaning */
729 if (!sk->sk_bound_dev_if &&
730 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
731 ireq6->iif = inet6_iif(skb);
732
733 /*
734 * Step 3: Process LISTEN state
735 *
736 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
737 *
738 * In fact we defer setting S.GSR, S.SWL, S.SWH to
739 * dccp_create_openreq_child.
740 */
741 dreq = dccp_rsk(req);
742 dreq->dreq_isr = dcb->dccpd_seq;
743 dreq->dreq_iss = dccp_v6_init_sequence(sk, skb);
744 dreq->dreq_service = service;
745
746 if (dccp_v6_send_response(sk, req, NULL))
747 goto drop_and_free;
748
749 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
750 return 0;
751
752 drop_and_free:
753 reqsk_free(req);
754 drop:
755 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
756 dcb->dccpd_reset_code = reset_code;
757 return -1;
758 }
759
760 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
761 struct sk_buff *skb,
762 struct request_sock *req,
763 struct dst_entry *dst)
764 {
765 struct inet6_request_sock *ireq6 = inet6_rsk(req);
766 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
767 struct inet_sock *newinet;
768 struct dccp_sock *newdp;
769 struct dccp6_sock *newdp6;
770 struct sock *newsk;
771 struct ipv6_txoptions *opt;
772
773 if (skb->protocol == htons(ETH_P_IP)) {
774 /*
775 * v6 mapped
776 */
777
778 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
779 if (newsk == NULL)
780 return NULL;
781
782 newdp6 = (struct dccp6_sock *)newsk;
783 newdp = dccp_sk(newsk);
784 newinet = inet_sk(newsk);
785 newinet->pinet6 = &newdp6->inet6;
786 newnp = inet6_sk(newsk);
787
788 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
789
790 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
791 newinet->daddr);
792
793 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
794 newinet->saddr);
795
796 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
797
798 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
799 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
800 newnp->pktoptions = NULL;
801 newnp->opt = NULL;
802 newnp->mcast_oif = inet6_iif(skb);
803 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
804
805 /*
806 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
807 * here, dccp_create_openreq_child now does this for us, see the comment in
808 * that function for the gory details. -acme
809 */
810
811 /* It is tricky place. Until this moment IPv4 tcp
812 worked with IPv6 icsk.icsk_af_ops.
813 Sync it now.
814 */
815 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
816
817 return newsk;
818 }
819
820 opt = np->opt;
821
822 if (sk_acceptq_is_full(sk))
823 goto out_overflow;
824
825 if (np->rxopt.bits.osrcrt == 2 &&
826 opt == NULL && ireq6->pktopts) {
827 struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts);
828 if (rxopt->srcrt)
829 opt = ipv6_invert_rthdr(sk,
830 (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw +
831 rxopt->srcrt));
832 }
833
834 if (dst == NULL) {
835 struct in6_addr *final_p = NULL, final;
836 struct flowi fl;
837
838 memset(&fl, 0, sizeof(fl));
839 fl.proto = IPPROTO_DCCP;
840 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
841 if (opt && opt->srcrt) {
842 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
843 ipv6_addr_copy(&final, &fl.fl6_dst);
844 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
845 final_p = &final;
846 }
847 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
848 fl.oif = sk->sk_bound_dev_if;
849 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
850 fl.fl_ip_sport = inet_sk(sk)->sport;
851
852 if (ip6_dst_lookup(sk, &dst, &fl))
853 goto out;
854
855 if (final_p)
856 ipv6_addr_copy(&fl.fl6_dst, final_p);
857
858 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
859 goto out;
860 }
861
862 newsk = dccp_create_openreq_child(sk, req, skb);
863 if (newsk == NULL)
864 goto out;
865
866 /*
867 * No need to charge this sock to the relevant IPv6 refcnt debug socks
868 * count here, dccp_create_openreq_child now does this for us, see the
869 * comment in that function for the gory details. -acme
870 */
871
872 ip6_dst_store(newsk, dst, NULL);
873 newsk->sk_route_caps = dst->dev->features &
874 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
875
876 newdp6 = (struct dccp6_sock *)newsk;
877 newinet = inet_sk(newsk);
878 newinet->pinet6 = &newdp6->inet6;
879 newdp = dccp_sk(newsk);
880 newnp = inet6_sk(newsk);
881
882 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
883
884 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
885 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
886 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
887 newsk->sk_bound_dev_if = ireq6->iif;
888
889 /* Now IPv6 options...
890
891 First: no IPv4 options.
892 */
893 newinet->opt = NULL;
894
895 /* Clone RX bits */
896 newnp->rxopt.all = np->rxopt.all;
897
898 /* Clone pktoptions received with SYN */
899 newnp->pktoptions = NULL;
900 if (ireq6->pktopts != NULL) {
901 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
902 kfree_skb(ireq6->pktopts);
903 ireq6->pktopts = NULL;
904 if (newnp->pktoptions)
905 skb_set_owner_r(newnp->pktoptions, newsk);
906 }
907 newnp->opt = NULL;
908 newnp->mcast_oif = inet6_iif(skb);
909 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
910
911 /* Clone native IPv6 options from listening socket (if any)
912
913 Yes, keeping reference count would be much more clever,
914 but we make one more one thing there: reattach optmem
915 to newsk.
916 */
917 if (opt) {
918 newnp->opt = ipv6_dup_options(newsk, opt);
919 if (opt != np->opt)
920 sock_kfree_s(sk, opt, opt->tot_len);
921 }
922
923 inet_csk(newsk)->icsk_ext_hdr_len = 0;
924 if (newnp->opt)
925 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
926 newnp->opt->opt_flen);
927
928 dccp_sync_mss(newsk, dst_mtu(dst));
929
930 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
931
932 __inet6_hash(&dccp_hashinfo, newsk);
933 inet_inherit_port(&dccp_hashinfo, sk, newsk);
934
935 return newsk;
936
937 out_overflow:
938 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
939 out:
940 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
941 if (opt && opt != np->opt)
942 sock_kfree_s(sk, opt, opt->tot_len);
943 dst_release(dst);
944 return NULL;
945 }
946
947 /* The socket must have it's spinlock held when we get
948 * here.
949 *
950 * We have a potential double-lock case here, so even when
951 * doing backlog processing we use the BH locking scheme.
952 * This is because we cannot sleep with the original spinlock
953 * held.
954 */
955 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
956 {
957 struct ipv6_pinfo *np = inet6_sk(sk);
958 struct sk_buff *opt_skb = NULL;
959
960 /* Imagine: socket is IPv6. IPv4 packet arrives,
961 goes to IPv4 receive handler and backlogged.
962 From backlog it always goes here. Kerboom...
963 Fortunately, dccp_rcv_established and rcv_established
964 handle them correctly, but it is not case with
965 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
966 */
967
968 if (skb->protocol == htons(ETH_P_IP))
969 return dccp_v4_do_rcv(sk, skb);
970
971 if (sk_filter(sk, skb, 0))
972 goto discard;
973
974 /*
975 * socket locking is here for SMP purposes as backlog rcv
976 * is currently called with bh processing disabled.
977 */
978
979 /* Do Stevens' IPV6_PKTOPTIONS.
980
981 Yes, guys, it is the only place in our code, where we
982 may make it not affecting IPv4.
983 The rest of code is protocol independent,
984 and I do not like idea to uglify IPv4.
985
986 Actually, all the idea behind IPV6_PKTOPTIONS
987 looks not very well thought. For now we latch
988 options, received in the last packet, enqueued
989 by tcp. Feel free to propose better solution.
990 --ANK (980728)
991 */
992 if (np->rxopt.all)
993 opt_skb = skb_clone(skb, GFP_ATOMIC);
994
995 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
996 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
997 goto reset;
998 return 0;
999 }
1000
1001 if (sk->sk_state == DCCP_LISTEN) {
1002 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
1003 if (!nsk)
1004 goto discard;
1005
1006 /*
1007 * Queue it on the new socket if the new socket is active,
1008 * otherwise we just shortcircuit this and continue with
1009 * the new socket..
1010 */
1011 if(nsk != sk) {
1012 if (dccp_child_process(sk, nsk, skb))
1013 goto reset;
1014 if (opt_skb)
1015 __kfree_skb(opt_skb);
1016 return 0;
1017 }
1018 }
1019
1020 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
1021 goto reset;
1022 return 0;
1023
1024 reset:
1025 dccp_v6_ctl_send_reset(skb);
1026 discard:
1027 if (opt_skb)
1028 __kfree_skb(opt_skb);
1029 kfree_skb(skb);
1030 return 0;
1031 }
1032
1033 static int dccp_v6_rcv(struct sk_buff **pskb)
1034 {
1035 const struct dccp_hdr *dh;
1036 struct sk_buff *skb = *pskb;
1037 struct sock *sk;
1038
1039 /* Step 1: Check header basics: */
1040
1041 if (dccp_invalid_packet(skb))
1042 goto discard_it;
1043
1044 dh = dccp_hdr(skb);
1045
1046 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
1047 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
1048
1049 if (dccp_packet_without_ack(skb))
1050 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
1051 else
1052 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
1053
1054 /* Step 2:
1055 * Look up flow ID in table and get corresponding socket */
1056 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
1057 dh->dccph_sport,
1058 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
1059 inet6_iif(skb));
1060 /*
1061 * Step 2:
1062 * If no socket ...
1063 * Generate Reset(No Connection) unless P.type == Reset
1064 * Drop packet and return
1065 */
1066 if (sk == NULL)
1067 goto no_dccp_socket;
1068
1069 /*
1070 * Step 2:
1071 * ... or S.state == TIMEWAIT,
1072 * Generate Reset(No Connection) unless P.type == Reset
1073 * Drop packet and return
1074 */
1075
1076 if (sk->sk_state == DCCP_TIME_WAIT)
1077 goto do_time_wait;
1078
1079 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1080 goto discard_and_relse;
1081
1082 return sk_receive_skb(sk, skb) ? -1 : 0;
1083
1084 no_dccp_socket:
1085 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1086 goto discard_it;
1087 /*
1088 * Step 2:
1089 * Generate Reset(No Connection) unless P.type == Reset
1090 * Drop packet and return
1091 */
1092 if (dh->dccph_type != DCCP_PKT_RESET) {
1093 DCCP_SKB_CB(skb)->dccpd_reset_code =
1094 DCCP_RESET_CODE_NO_CONNECTION;
1095 dccp_v6_ctl_send_reset(skb);
1096 }
1097 discard_it:
1098
1099 /*
1100 * Discard frame
1101 */
1102
1103 kfree_skb(skb);
1104 return 0;
1105
1106 discard_and_relse:
1107 sock_put(sk);
1108 goto discard_it;
1109
1110 do_time_wait:
1111 inet_twsk_put((struct inet_timewait_sock *)sk);
1112 goto no_dccp_socket;
1113 }
1114
1115 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1116 .queue_xmit = inet6_csk_xmit,
1117 .send_check = dccp_v6_send_check,
1118 .rebuild_header = inet6_sk_rebuild_header,
1119 .conn_request = dccp_v6_conn_request,
1120 .syn_recv_sock = dccp_v6_request_recv_sock,
1121 .net_header_len = sizeof(struct ipv6hdr),
1122 .setsockopt = ipv6_setsockopt,
1123 .getsockopt = ipv6_getsockopt,
1124 .addr2sockaddr = inet6_csk_addr2sockaddr,
1125 .sockaddr_len = sizeof(struct sockaddr_in6)
1126 };
1127
1128 /*
1129 * DCCP over IPv4 via INET6 API
1130 */
1131 static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1132 .queue_xmit = ip_queue_xmit,
1133 .send_check = dccp_v4_send_check,
1134 .rebuild_header = inet_sk_rebuild_header,
1135 .conn_request = dccp_v6_conn_request,
1136 .syn_recv_sock = dccp_v6_request_recv_sock,
1137 .net_header_len = sizeof(struct iphdr),
1138 .setsockopt = ipv6_setsockopt,
1139 .getsockopt = ipv6_getsockopt,
1140 .addr2sockaddr = inet6_csk_addr2sockaddr,
1141 .sockaddr_len = sizeof(struct sockaddr_in6)
1142 };
1143
1144 /* NOTE: A lot of things set to zero explicitly by call to
1145 * sk_alloc() so need not be done here.
1146 */
1147 static int dccp_v6_init_sock(struct sock *sk)
1148 {
1149 int err = dccp_v4_init_sock(sk);
1150
1151 if (err == 0)
1152 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1153
1154 return err;
1155 }
1156
1157 static int dccp_v6_destroy_sock(struct sock *sk)
1158 {
1159 dccp_v4_destroy_sock(sk);
1160 return inet6_destroy_sock(sk);
1161 }
1162
1163 static struct proto dccp_v6_prot = {
1164 .name = "DCCPv6",
1165 .owner = THIS_MODULE,
1166 .close = dccp_close,
1167 .connect = dccp_v6_connect,
1168 .disconnect = dccp_disconnect,
1169 .ioctl = dccp_ioctl,
1170 .init = dccp_v6_init_sock,
1171 .setsockopt = dccp_setsockopt,
1172 .getsockopt = dccp_getsockopt,
1173 .sendmsg = dccp_sendmsg,
1174 .recvmsg = dccp_recvmsg,
1175 .backlog_rcv = dccp_v6_do_rcv,
1176 .hash = dccp_v6_hash,
1177 .unhash = dccp_unhash,
1178 .accept = inet_csk_accept,
1179 .get_port = dccp_v6_get_port,
1180 .shutdown = dccp_shutdown,
1181 .destroy = dccp_v6_destroy_sock,
1182 .orphan_count = &dccp_orphan_count,
1183 .max_header = MAX_DCCP_HEADER,
1184 .obj_size = sizeof(struct dccp6_sock),
1185 .rsk_prot = &dccp6_request_sock_ops,
1186 .twsk_prot = &dccp6_timewait_sock_ops,
1187 };
1188
1189 static struct inet6_protocol dccp_v6_protocol = {
1190 .handler = dccp_v6_rcv,
1191 .err_handler = dccp_v6_err,
1192 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1193 };
1194
1195 static struct proto_ops inet6_dccp_ops = {
1196 .family = PF_INET6,
1197 .owner = THIS_MODULE,
1198 .release = inet6_release,
1199 .bind = inet6_bind,
1200 .connect = inet_stream_connect,
1201 .socketpair = sock_no_socketpair,
1202 .accept = inet_accept,
1203 .getname = inet6_getname,
1204 .poll = dccp_poll,
1205 .ioctl = inet6_ioctl,
1206 .listen = inet_dccp_listen,
1207 .shutdown = inet_shutdown,
1208 .setsockopt = sock_common_setsockopt,
1209 .getsockopt = sock_common_getsockopt,
1210 .sendmsg = inet_sendmsg,
1211 .recvmsg = sock_common_recvmsg,
1212 .mmap = sock_no_mmap,
1213 .sendpage = sock_no_sendpage,
1214 };
1215
1216 static struct inet_protosw dccp_v6_protosw = {
1217 .type = SOCK_DCCP,
1218 .protocol = IPPROTO_DCCP,
1219 .prot = &dccp_v6_prot,
1220 .ops = &inet6_dccp_ops,
1221 .capability = -1,
1222 .flags = INET_PROTOSW_ICSK,
1223 };
1224
1225 static int __init dccp_v6_init(void)
1226 {
1227 int err = proto_register(&dccp_v6_prot, 1);
1228
1229 if (err != 0)
1230 goto out;
1231
1232 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1233 if (err != 0)
1234 goto out_unregister_proto;
1235
1236 inet6_register_protosw(&dccp_v6_protosw);
1237 out:
1238 return err;
1239 out_unregister_proto:
1240 proto_unregister(&dccp_v6_prot);
1241 goto out;
1242 }
1243
1244 static void __exit dccp_v6_exit(void)
1245 {
1246 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1247 inet6_unregister_protosw(&dccp_v6_protosw);
1248 proto_unregister(&dccp_v6_prot);
1249 }
1250
1251 module_init(dccp_v6_init);
1252 module_exit(dccp_v6_exit);
1253
1254 /*
1255 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1256 * values directly, Also cover the case where the protocol is not specified,
1257 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1258 */
1259 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6");
1260 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6");
1261 MODULE_LICENSE("GPL");
1262 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1263 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");