[SK_BUFF]: Introduce ipv6_hdr(), remove skb->nh.ipv6h
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/xfrm.h>
18
19 #include <net/addrconf.h>
20 #include <net/inet_common.h>
21 #include <net/inet_hashtables.h>
22 #include <net/inet_sock.h>
23 #include <net/inet6_connection_sock.h>
24 #include <net/inet6_hashtables.h>
25 #include <net/ip6_route.h>
26 #include <net/ipv6.h>
27 #include <net/protocol.h>
28 #include <net/transp_v6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/xfrm.h>
31
32 #include "dccp.h"
33 #include "ipv6.h"
34 #include "feat.h"
35
36 /* Socket used for sending RSTs and ACKs */
37 static struct socket *dccp_v6_ctl_socket;
38
39 static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
40 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
41
42 static int dccp_v6_get_port(struct sock *sk, unsigned short snum)
43 {
44 return inet_csk_get_port(&dccp_hashinfo, sk, snum,
45 inet6_csk_bind_conflict);
46 }
47
48 static void dccp_v6_hash(struct sock *sk)
49 {
50 if (sk->sk_state != DCCP_CLOSED) {
51 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
52 dccp_hash(sk);
53 return;
54 }
55 local_bh_disable();
56 __inet6_hash(&dccp_hashinfo, sk);
57 local_bh_enable();
58 }
59 }
60
61 /* add pseudo-header to DCCP checksum stored in skb->csum */
62 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
63 struct in6_addr *saddr,
64 struct in6_addr *daddr)
65 {
66 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
67 }
68
69 static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
70 struct sk_buff *skb)
71 {
72 struct ipv6_pinfo *np = inet6_sk(sk);
73 struct dccp_hdr *dh = dccp_hdr(skb);
74
75 dccp_csum_outgoing(skb);
76 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
77 }
78
79 static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
80 __be16 sport, __be16 dport )
81 {
82 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
83 }
84
85 static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
86 {
87 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
88 ipv6_hdr(skb)->saddr.s6_addr32,
89 dccp_hdr(skb)->dccph_dport,
90 dccp_hdr(skb)->dccph_sport );
91
92 }
93
94 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
95 int type, int code, int offset, __be32 info)
96 {
97 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
98 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
99 struct ipv6_pinfo *np;
100 struct sock *sk;
101 int err;
102 __u64 seq;
103
104 sk = inet6_lookup(&dccp_hashinfo, &hdr->daddr, dh->dccph_dport,
105 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
106
107 if (sk == NULL) {
108 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
109 return;
110 }
111
112 if (sk->sk_state == DCCP_TIME_WAIT) {
113 inet_twsk_put(inet_twsk(sk));
114 return;
115 }
116
117 bh_lock_sock(sk);
118 if (sock_owned_by_user(sk))
119 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
120
121 if (sk->sk_state == DCCP_CLOSED)
122 goto out;
123
124 np = inet6_sk(sk);
125
126 if (type == ICMPV6_PKT_TOOBIG) {
127 struct dst_entry *dst = NULL;
128
129 if (sock_owned_by_user(sk))
130 goto out;
131 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
132 goto out;
133
134 /* icmp should have updated the destination cache entry */
135 dst = __sk_dst_check(sk, np->dst_cookie);
136 if (dst == NULL) {
137 struct inet_sock *inet = inet_sk(sk);
138 struct flowi fl;
139
140 /* BUGGG_FUTURE: Again, it is not clear how
141 to handle rthdr case. Ignore this complexity
142 for now.
143 */
144 memset(&fl, 0, sizeof(fl));
145 fl.proto = IPPROTO_DCCP;
146 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
147 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
148 fl.oif = sk->sk_bound_dev_if;
149 fl.fl_ip_dport = inet->dport;
150 fl.fl_ip_sport = inet->sport;
151 security_sk_classify_flow(sk, &fl);
152
153 err = ip6_dst_lookup(sk, &dst, &fl);
154 if (err) {
155 sk->sk_err_soft = -err;
156 goto out;
157 }
158
159 err = xfrm_lookup(&dst, &fl, sk, 0);
160 if (err < 0) {
161 sk->sk_err_soft = -err;
162 goto out;
163 }
164 } else
165 dst_hold(dst);
166
167 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
168 dccp_sync_mss(sk, dst_mtu(dst));
169 } /* else let the usual retransmit timer handle it */
170 dst_release(dst);
171 goto out;
172 }
173
174 icmpv6_err_convert(type, code, &err);
175
176 seq = DCCP_SKB_CB(skb)->dccpd_seq;
177 /* Might be for an request_sock */
178 switch (sk->sk_state) {
179 struct request_sock *req, **prev;
180 case DCCP_LISTEN:
181 if (sock_owned_by_user(sk))
182 goto out;
183
184 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
185 &hdr->daddr, &hdr->saddr,
186 inet6_iif(skb));
187 if (req == NULL)
188 goto out;
189
190 /*
191 * ICMPs are not backlogged, hence we cannot get an established
192 * socket here.
193 */
194 BUG_TRAP(req->sk == NULL);
195
196 if (seq != dccp_rsk(req)->dreq_iss) {
197 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
198 goto out;
199 }
200
201 inet_csk_reqsk_queue_drop(sk, req, prev);
202 goto out;
203
204 case DCCP_REQUESTING:
205 case DCCP_RESPOND: /* Cannot happen.
206 It can, it SYNs are crossed. --ANK */
207 if (!sock_owned_by_user(sk)) {
208 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
209 sk->sk_err = err;
210 /*
211 * Wake people up to see the error
212 * (see connect in sock.c)
213 */
214 sk->sk_error_report(sk);
215 dccp_done(sk);
216 } else
217 sk->sk_err_soft = err;
218 goto out;
219 }
220
221 if (!sock_owned_by_user(sk) && np->recverr) {
222 sk->sk_err = err;
223 sk->sk_error_report(sk);
224 } else
225 sk->sk_err_soft = err;
226
227 out:
228 bh_unlock_sock(sk);
229 sock_put(sk);
230 }
231
232
233 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
234 struct dst_entry *dst)
235 {
236 struct inet6_request_sock *ireq6 = inet6_rsk(req);
237 struct ipv6_pinfo *np = inet6_sk(sk);
238 struct sk_buff *skb;
239 struct ipv6_txoptions *opt = NULL;
240 struct in6_addr *final_p = NULL, final;
241 struct flowi fl;
242 int err = -1;
243
244 memset(&fl, 0, sizeof(fl));
245 fl.proto = IPPROTO_DCCP;
246 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
247 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
248 fl.fl6_flowlabel = 0;
249 fl.oif = ireq6->iif;
250 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
251 fl.fl_ip_sport = inet_sk(sk)->sport;
252 security_req_classify_flow(req, &fl);
253
254 if (dst == NULL) {
255 opt = np->opt;
256 if (opt == NULL &&
257 np->rxopt.bits.osrcrt == 2 &&
258 ireq6->pktopts) {
259 struct sk_buff *pktopts = ireq6->pktopts;
260 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
261
262 if (rxopt->srcrt)
263 opt = ipv6_invert_rthdr(sk,
264 (struct ipv6_rt_hdr *)(skb_network_header(pktopts) +
265 rxopt->srcrt));
266 }
267
268 if (opt != NULL && opt->srcrt != NULL) {
269 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
270
271 ipv6_addr_copy(&final, &fl.fl6_dst);
272 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
273 final_p = &final;
274 }
275
276 err = ip6_dst_lookup(sk, &dst, &fl);
277 if (err)
278 goto done;
279
280 if (final_p)
281 ipv6_addr_copy(&fl.fl6_dst, final_p);
282
283 err = xfrm_lookup(&dst, &fl, sk, 0);
284 if (err < 0)
285 goto done;
286 }
287
288 skb = dccp_make_response(sk, dst, req);
289 if (skb != NULL) {
290 struct dccp_hdr *dh = dccp_hdr(skb);
291
292 dh->dccph_checksum = dccp_v6_csum_finish(skb,
293 &ireq6->loc_addr,
294 &ireq6->rmt_addr);
295 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
296 err = ip6_xmit(sk, skb, &fl, opt, 0);
297 err = net_xmit_eval(err);
298 }
299
300 done:
301 if (opt != NULL && opt != np->opt)
302 sock_kfree_s(sk, opt, opt->tot_len);
303 dst_release(dst);
304 return err;
305 }
306
307 static void dccp_v6_reqsk_destructor(struct request_sock *req)
308 {
309 if (inet6_rsk(req)->pktopts != NULL)
310 kfree_skb(inet6_rsk(req)->pktopts);
311 }
312
313 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
314 {
315 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
316 struct ipv6hdr *rxip6h;
317 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
318 sizeof(struct dccp_hdr_ext) +
319 sizeof(struct dccp_hdr_reset);
320 struct sk_buff *skb;
321 struct flowi fl;
322 u64 seqno = 0;
323
324 if (rxdh->dccph_type == DCCP_PKT_RESET)
325 return;
326
327 if (!ipv6_unicast_destination(rxskb))
328 return;
329
330 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
331 GFP_ATOMIC);
332 if (skb == NULL)
333 return;
334
335 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
336
337 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
338
339 /* Swap the send and the receive. */
340 dh->dccph_type = DCCP_PKT_RESET;
341 dh->dccph_sport = rxdh->dccph_dport;
342 dh->dccph_dport = rxdh->dccph_sport;
343 dh->dccph_doff = dccp_hdr_reset_len / 4;
344 dh->dccph_x = 1;
345 dccp_hdr_reset(skb)->dccph_reset_code =
346 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
347
348 /* See "8.3.1. Abnormal Termination" in RFC 4340 */
349 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
350 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
351
352 dccp_hdr_set_seq(dh, seqno);
353 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq);
354
355 dccp_csum_outgoing(skb);
356 rxip6h = ipv6_hdr(rxskb);
357 dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
358 &rxip6h->daddr);
359
360 memset(&fl, 0, sizeof(fl));
361 ipv6_addr_copy(&fl.fl6_dst, &rxip6h->saddr);
362 ipv6_addr_copy(&fl.fl6_src, &rxip6h->daddr);
363
364 fl.proto = IPPROTO_DCCP;
365 fl.oif = inet6_iif(rxskb);
366 fl.fl_ip_dport = dh->dccph_dport;
367 fl.fl_ip_sport = dh->dccph_sport;
368 security_skb_classify_flow(rxskb, &fl);
369
370 /* sk = NULL, but it is safe for now. RST socket required. */
371 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
372 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
373 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
374 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
375 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
376 return;
377 }
378 }
379
380 kfree_skb(skb);
381 }
382
383 static struct request_sock_ops dccp6_request_sock_ops = {
384 .family = AF_INET6,
385 .obj_size = sizeof(struct dccp6_request_sock),
386 .rtx_syn_ack = dccp_v6_send_response,
387 .send_ack = dccp_reqsk_send_ack,
388 .destructor = dccp_v6_reqsk_destructor,
389 .send_reset = dccp_v6_ctl_send_reset,
390 };
391
392 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
393 {
394 const struct dccp_hdr *dh = dccp_hdr(skb);
395 const struct ipv6hdr *iph = ipv6_hdr(skb);
396 struct sock *nsk;
397 struct request_sock **prev;
398 /* Find possible connection requests. */
399 struct request_sock *req = inet6_csk_search_req(sk, &prev,
400 dh->dccph_sport,
401 &iph->saddr,
402 &iph->daddr,
403 inet6_iif(skb));
404 if (req != NULL)
405 return dccp_check_req(sk, skb, req, prev);
406
407 nsk = __inet6_lookup_established(&dccp_hashinfo,
408 &iph->saddr, dh->dccph_sport,
409 &iph->daddr, ntohs(dh->dccph_dport),
410 inet6_iif(skb));
411 if (nsk != NULL) {
412 if (nsk->sk_state != DCCP_TIME_WAIT) {
413 bh_lock_sock(nsk);
414 return nsk;
415 }
416 inet_twsk_put(inet_twsk(nsk));
417 return NULL;
418 }
419
420 return sk;
421 }
422
423 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
424 {
425 struct request_sock *req;
426 struct dccp_request_sock *dreq;
427 struct inet6_request_sock *ireq6;
428 struct ipv6_pinfo *np = inet6_sk(sk);
429 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
430 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
431 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
432
433 if (skb->protocol == htons(ETH_P_IP))
434 return dccp_v4_conn_request(sk, skb);
435
436 if (!ipv6_unicast_destination(skb))
437 goto drop;
438
439 if (dccp_bad_service_code(sk, service)) {
440 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
441 goto drop;
442 }
443 /*
444 * There are no SYN attacks on IPv6, yet...
445 */
446 if (inet_csk_reqsk_queue_is_full(sk))
447 goto drop;
448
449 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
450 goto drop;
451
452 req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
453 if (req == NULL)
454 goto drop;
455
456 if (dccp_parse_options(sk, skb))
457 goto drop_and_free;
458
459 dccp_reqsk_init(req, skb);
460
461 if (security_inet_conn_request(sk, skb, req))
462 goto drop_and_free;
463
464 ireq6 = inet6_rsk(req);
465 ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
466 ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
467 ireq6->pktopts = NULL;
468
469 if (ipv6_opt_accepted(sk, skb) ||
470 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
471 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
472 atomic_inc(&skb->users);
473 ireq6->pktopts = skb;
474 }
475 ireq6->iif = sk->sk_bound_dev_if;
476
477 /* So that link locals have meaning */
478 if (!sk->sk_bound_dev_if &&
479 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
480 ireq6->iif = inet6_iif(skb);
481
482 /*
483 * Step 3: Process LISTEN state
484 *
485 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
486 *
487 * In fact we defer setting S.GSR, S.SWL, S.SWH to
488 * dccp_create_openreq_child.
489 */
490 dreq = dccp_rsk(req);
491 dreq->dreq_isr = dcb->dccpd_seq;
492 dreq->dreq_iss = dccp_v6_init_sequence(skb);
493 dreq->dreq_service = service;
494
495 if (dccp_v6_send_response(sk, req, NULL))
496 goto drop_and_free;
497
498 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
499 return 0;
500
501 drop_and_free:
502 reqsk_free(req);
503 drop:
504 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
505 dcb->dccpd_reset_code = reset_code;
506 return -1;
507 }
508
509 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
510 struct sk_buff *skb,
511 struct request_sock *req,
512 struct dst_entry *dst)
513 {
514 struct inet6_request_sock *ireq6 = inet6_rsk(req);
515 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
516 struct inet_sock *newinet;
517 struct dccp_sock *newdp;
518 struct dccp6_sock *newdp6;
519 struct sock *newsk;
520 struct ipv6_txoptions *opt;
521
522 if (skb->protocol == htons(ETH_P_IP)) {
523 /*
524 * v6 mapped
525 */
526 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
527 if (newsk == NULL)
528 return NULL;
529
530 newdp6 = (struct dccp6_sock *)newsk;
531 newdp = dccp_sk(newsk);
532 newinet = inet_sk(newsk);
533 newinet->pinet6 = &newdp6->inet6;
534 newnp = inet6_sk(newsk);
535
536 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
537
538 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
539 newinet->daddr);
540
541 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
542 newinet->saddr);
543
544 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
545
546 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
547 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
548 newnp->pktoptions = NULL;
549 newnp->opt = NULL;
550 newnp->mcast_oif = inet6_iif(skb);
551 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
552
553 /*
554 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
555 * here, dccp_create_openreq_child now does this for us, see the comment in
556 * that function for the gory details. -acme
557 */
558
559 /* It is tricky place. Until this moment IPv4 tcp
560 worked with IPv6 icsk.icsk_af_ops.
561 Sync it now.
562 */
563 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
564
565 return newsk;
566 }
567
568 opt = np->opt;
569
570 if (sk_acceptq_is_full(sk))
571 goto out_overflow;
572
573 if (np->rxopt.bits.osrcrt == 2 && opt == NULL && ireq6->pktopts) {
574 const struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts);
575
576 if (rxopt->srcrt)
577 opt = ipv6_invert_rthdr(sk,
578 (struct ipv6_rt_hdr *)(skb_network_header(ireq6->pktopts) +
579 rxopt->srcrt));
580 }
581
582 if (dst == NULL) {
583 struct in6_addr *final_p = NULL, final;
584 struct flowi fl;
585
586 memset(&fl, 0, sizeof(fl));
587 fl.proto = IPPROTO_DCCP;
588 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
589 if (opt != NULL && opt->srcrt != NULL) {
590 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
591
592 ipv6_addr_copy(&final, &fl.fl6_dst);
593 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
594 final_p = &final;
595 }
596 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
597 fl.oif = sk->sk_bound_dev_if;
598 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
599 fl.fl_ip_sport = inet_sk(sk)->sport;
600 security_sk_classify_flow(sk, &fl);
601
602 if (ip6_dst_lookup(sk, &dst, &fl))
603 goto out;
604
605 if (final_p)
606 ipv6_addr_copy(&fl.fl6_dst, final_p);
607
608 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
609 goto out;
610 }
611
612 newsk = dccp_create_openreq_child(sk, req, skb);
613 if (newsk == NULL)
614 goto out;
615
616 /*
617 * No need to charge this sock to the relevant IPv6 refcnt debug socks
618 * count here, dccp_create_openreq_child now does this for us, see the
619 * comment in that function for the gory details. -acme
620 */
621
622 __ip6_dst_store(newsk, dst, NULL, NULL);
623 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
624 NETIF_F_TSO);
625 newdp6 = (struct dccp6_sock *)newsk;
626 newinet = inet_sk(newsk);
627 newinet->pinet6 = &newdp6->inet6;
628 newdp = dccp_sk(newsk);
629 newnp = inet6_sk(newsk);
630
631 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
632
633 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
634 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
635 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
636 newsk->sk_bound_dev_if = ireq6->iif;
637
638 /* Now IPv6 options...
639
640 First: no IPv4 options.
641 */
642 newinet->opt = NULL;
643
644 /* Clone RX bits */
645 newnp->rxopt.all = np->rxopt.all;
646
647 /* Clone pktoptions received with SYN */
648 newnp->pktoptions = NULL;
649 if (ireq6->pktopts != NULL) {
650 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
651 kfree_skb(ireq6->pktopts);
652 ireq6->pktopts = NULL;
653 if (newnp->pktoptions)
654 skb_set_owner_r(newnp->pktoptions, newsk);
655 }
656 newnp->opt = NULL;
657 newnp->mcast_oif = inet6_iif(skb);
658 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
659
660 /*
661 * Clone native IPv6 options from listening socket (if any)
662 *
663 * Yes, keeping reference count would be much more clever, but we make
664 * one more one thing there: reattach optmem to newsk.
665 */
666 if (opt != NULL) {
667 newnp->opt = ipv6_dup_options(newsk, opt);
668 if (opt != np->opt)
669 sock_kfree_s(sk, opt, opt->tot_len);
670 }
671
672 inet_csk(newsk)->icsk_ext_hdr_len = 0;
673 if (newnp->opt != NULL)
674 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
675 newnp->opt->opt_flen);
676
677 dccp_sync_mss(newsk, dst_mtu(dst));
678
679 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
680
681 __inet6_hash(&dccp_hashinfo, newsk);
682 inet_inherit_port(&dccp_hashinfo, sk, newsk);
683
684 return newsk;
685
686 out_overflow:
687 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
688 out:
689 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
690 if (opt != NULL && opt != np->opt)
691 sock_kfree_s(sk, opt, opt->tot_len);
692 dst_release(dst);
693 return NULL;
694 }
695
696 /* The socket must have it's spinlock held when we get
697 * here.
698 *
699 * We have a potential double-lock case here, so even when
700 * doing backlog processing we use the BH locking scheme.
701 * This is because we cannot sleep with the original spinlock
702 * held.
703 */
704 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
705 {
706 struct ipv6_pinfo *np = inet6_sk(sk);
707 struct sk_buff *opt_skb = NULL;
708
709 /* Imagine: socket is IPv6. IPv4 packet arrives,
710 goes to IPv4 receive handler and backlogged.
711 From backlog it always goes here. Kerboom...
712 Fortunately, dccp_rcv_established and rcv_established
713 handle them correctly, but it is not case with
714 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
715 */
716
717 if (skb->protocol == htons(ETH_P_IP))
718 return dccp_v4_do_rcv(sk, skb);
719
720 if (sk_filter(sk, skb))
721 goto discard;
722
723 /*
724 * socket locking is here for SMP purposes as backlog rcv is currently
725 * called with bh processing disabled.
726 */
727
728 /* Do Stevens' IPV6_PKTOPTIONS.
729
730 Yes, guys, it is the only place in our code, where we
731 may make it not affecting IPv4.
732 The rest of code is protocol independent,
733 and I do not like idea to uglify IPv4.
734
735 Actually, all the idea behind IPV6_PKTOPTIONS
736 looks not very well thought. For now we latch
737 options, received in the last packet, enqueued
738 by tcp. Feel free to propose better solution.
739 --ANK (980728)
740 */
741 if (np->rxopt.all)
742 /*
743 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
744 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
745 */
746 opt_skb = skb_clone(skb, GFP_ATOMIC);
747
748 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
749 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
750 goto reset;
751 if (opt_skb) {
752 /* XXX This is where we would goto ipv6_pktoptions. */
753 __kfree_skb(opt_skb);
754 }
755 return 0;
756 }
757
758 /*
759 * Step 3: Process LISTEN state
760 * If S.state == LISTEN,
761 * If P.type == Request or P contains a valid Init Cookie option,
762 * (* Must scan the packet's options to check for Init
763 * Cookies. Only Init Cookies are processed here,
764 * however; other options are processed in Step 8. This
765 * scan need only be performed if the endpoint uses Init
766 * Cookies *)
767 * (* Generate a new socket and switch to that socket *)
768 * Set S := new socket for this port pair
769 * S.state = RESPOND
770 * Choose S.ISS (initial seqno) or set from Init Cookies
771 * Initialize S.GAR := S.ISS
772 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
773 * Continue with S.state == RESPOND
774 * (* A Response packet will be generated in Step 11 *)
775 * Otherwise,
776 * Generate Reset(No Connection) unless P.type == Reset
777 * Drop packet and return
778 *
779 * NOTE: the check for the packet types is done in
780 * dccp_rcv_state_process
781 */
782 if (sk->sk_state == DCCP_LISTEN) {
783 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
784
785 if (nsk == NULL)
786 goto discard;
787 /*
788 * Queue it on the new socket if the new socket is active,
789 * otherwise we just shortcircuit this and continue with
790 * the new socket..
791 */
792 if (nsk != sk) {
793 if (dccp_child_process(sk, nsk, skb))
794 goto reset;
795 if (opt_skb != NULL)
796 __kfree_skb(opt_skb);
797 return 0;
798 }
799 }
800
801 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
802 goto reset;
803 if (opt_skb) {
804 /* XXX This is where we would goto ipv6_pktoptions. */
805 __kfree_skb(opt_skb);
806 }
807 return 0;
808
809 reset:
810 dccp_v6_ctl_send_reset(sk, skb);
811 discard:
812 if (opt_skb != NULL)
813 __kfree_skb(opt_skb);
814 kfree_skb(skb);
815 return 0;
816 }
817
818 static int dccp_v6_rcv(struct sk_buff **pskb)
819 {
820 const struct dccp_hdr *dh;
821 struct sk_buff *skb = *pskb;
822 struct sock *sk;
823 int min_cov;
824
825 /* Step 1: Check header basics */
826
827 if (dccp_invalid_packet(skb))
828 goto discard_it;
829
830 /* Step 1: If header checksum is incorrect, drop packet and return. */
831 if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
832 &ipv6_hdr(skb)->daddr)) {
833 DCCP_WARN("dropped packet with invalid checksum\n");
834 goto discard_it;
835 }
836
837 dh = dccp_hdr(skb);
838
839 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
840 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
841
842 if (dccp_packet_without_ack(skb))
843 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
844 else
845 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
846
847 /* Step 2:
848 * Look up flow ID in table and get corresponding socket */
849 sk = __inet6_lookup(&dccp_hashinfo, &ipv6_hdr(skb)->saddr,
850 dh->dccph_sport,
851 &ipv6_hdr(skb)->daddr, ntohs(dh->dccph_dport),
852 inet6_iif(skb));
853 /*
854 * Step 2:
855 * If no socket ...
856 */
857 if (sk == NULL) {
858 dccp_pr_debug("failed to look up flow ID in table and "
859 "get corresponding socket\n");
860 goto no_dccp_socket;
861 }
862
863 /*
864 * Step 2:
865 * ... or S.state == TIMEWAIT,
866 * Generate Reset(No Connection) unless P.type == Reset
867 * Drop packet and return
868 */
869 if (sk->sk_state == DCCP_TIME_WAIT) {
870 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
871 inet_twsk_put(inet_twsk(sk));
872 goto no_dccp_socket;
873 }
874
875 /*
876 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
877 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
878 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
879 */
880 min_cov = dccp_sk(sk)->dccps_pcrlen;
881 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
882 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
883 dh->dccph_cscov, min_cov);
884 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
885 goto discard_and_relse;
886 }
887
888 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
889 goto discard_and_relse;
890
891 return sk_receive_skb(sk, skb, 1) ? -1 : 0;
892
893 no_dccp_socket:
894 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
895 goto discard_it;
896 /*
897 * Step 2:
898 * If no socket ...
899 * Generate Reset(No Connection) unless P.type == Reset
900 * Drop packet and return
901 */
902 if (dh->dccph_type != DCCP_PKT_RESET) {
903 DCCP_SKB_CB(skb)->dccpd_reset_code =
904 DCCP_RESET_CODE_NO_CONNECTION;
905 dccp_v6_ctl_send_reset(sk, skb);
906 }
907
908 discard_it:
909 kfree_skb(skb);
910 return 0;
911
912 discard_and_relse:
913 sock_put(sk);
914 goto discard_it;
915 }
916
917 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
918 int addr_len)
919 {
920 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
921 struct inet_connection_sock *icsk = inet_csk(sk);
922 struct inet_sock *inet = inet_sk(sk);
923 struct ipv6_pinfo *np = inet6_sk(sk);
924 struct dccp_sock *dp = dccp_sk(sk);
925 struct in6_addr *saddr = NULL, *final_p = NULL, final;
926 struct flowi fl;
927 struct dst_entry *dst;
928 int addr_type;
929 int err;
930
931 dp->dccps_role = DCCP_ROLE_CLIENT;
932
933 if (addr_len < SIN6_LEN_RFC2133)
934 return -EINVAL;
935
936 if (usin->sin6_family != AF_INET6)
937 return -EAFNOSUPPORT;
938
939 memset(&fl, 0, sizeof(fl));
940
941 if (np->sndflow) {
942 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
943 IP6_ECN_flow_init(fl.fl6_flowlabel);
944 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
945 struct ip6_flowlabel *flowlabel;
946 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
947 if (flowlabel == NULL)
948 return -EINVAL;
949 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
950 fl6_sock_release(flowlabel);
951 }
952 }
953 /*
954 * connect() to INADDR_ANY means loopback (BSD'ism).
955 */
956 if (ipv6_addr_any(&usin->sin6_addr))
957 usin->sin6_addr.s6_addr[15] = 1;
958
959 addr_type = ipv6_addr_type(&usin->sin6_addr);
960
961 if (addr_type & IPV6_ADDR_MULTICAST)
962 return -ENETUNREACH;
963
964 if (addr_type & IPV6_ADDR_LINKLOCAL) {
965 if (addr_len >= sizeof(struct sockaddr_in6) &&
966 usin->sin6_scope_id) {
967 /* If interface is set while binding, indices
968 * must coincide.
969 */
970 if (sk->sk_bound_dev_if &&
971 sk->sk_bound_dev_if != usin->sin6_scope_id)
972 return -EINVAL;
973
974 sk->sk_bound_dev_if = usin->sin6_scope_id;
975 }
976
977 /* Connect to link-local address requires an interface */
978 if (!sk->sk_bound_dev_if)
979 return -EINVAL;
980 }
981
982 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
983 np->flow_label = fl.fl6_flowlabel;
984
985 /*
986 * DCCP over IPv4
987 */
988 if (addr_type == IPV6_ADDR_MAPPED) {
989 u32 exthdrlen = icsk->icsk_ext_hdr_len;
990 struct sockaddr_in sin;
991
992 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
993
994 if (__ipv6_only_sock(sk))
995 return -ENETUNREACH;
996
997 sin.sin_family = AF_INET;
998 sin.sin_port = usin->sin6_port;
999 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
1000
1001 icsk->icsk_af_ops = &dccp_ipv6_mapped;
1002 sk->sk_backlog_rcv = dccp_v4_do_rcv;
1003
1004 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
1005 if (err) {
1006 icsk->icsk_ext_hdr_len = exthdrlen;
1007 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
1008 sk->sk_backlog_rcv = dccp_v6_do_rcv;
1009 goto failure;
1010 } else {
1011 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
1012 inet->saddr);
1013 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
1014 inet->rcv_saddr);
1015 }
1016
1017 return err;
1018 }
1019
1020 if (!ipv6_addr_any(&np->rcv_saddr))
1021 saddr = &np->rcv_saddr;
1022
1023 fl.proto = IPPROTO_DCCP;
1024 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1025 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
1026 fl.oif = sk->sk_bound_dev_if;
1027 fl.fl_ip_dport = usin->sin6_port;
1028 fl.fl_ip_sport = inet->sport;
1029 security_sk_classify_flow(sk, &fl);
1030
1031 if (np->opt != NULL && np->opt->srcrt != NULL) {
1032 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
1033
1034 ipv6_addr_copy(&final, &fl.fl6_dst);
1035 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1036 final_p = &final;
1037 }
1038
1039 err = ip6_dst_lookup(sk, &dst, &fl);
1040 if (err)
1041 goto failure;
1042
1043 if (final_p)
1044 ipv6_addr_copy(&fl.fl6_dst, final_p);
1045
1046 err = xfrm_lookup(&dst, &fl, sk, 1);
1047 if (err < 0)
1048 goto failure;
1049
1050 if (saddr == NULL) {
1051 saddr = &fl.fl6_src;
1052 ipv6_addr_copy(&np->rcv_saddr, saddr);
1053 }
1054
1055 /* set the source address */
1056 ipv6_addr_copy(&np->saddr, saddr);
1057 inet->rcv_saddr = LOOPBACK4_IPV6;
1058
1059 __ip6_dst_store(sk, dst, NULL, NULL);
1060
1061 icsk->icsk_ext_hdr_len = 0;
1062 if (np->opt != NULL)
1063 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
1064 np->opt->opt_nflen);
1065
1066 inet->dport = usin->sin6_port;
1067
1068 dccp_set_state(sk, DCCP_REQUESTING);
1069 err = inet6_hash_connect(&dccp_death_row, sk);
1070 if (err)
1071 goto late_failure;
1072
1073 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
1074 np->daddr.s6_addr32,
1075 inet->sport, inet->dport);
1076 err = dccp_connect(sk);
1077 if (err)
1078 goto late_failure;
1079
1080 return 0;
1081
1082 late_failure:
1083 dccp_set_state(sk, DCCP_CLOSED);
1084 __sk_dst_reset(sk);
1085 failure:
1086 inet->dport = 0;
1087 sk->sk_route_caps = 0;
1088 return err;
1089 }
1090
1091 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1092 .queue_xmit = inet6_csk_xmit,
1093 .send_check = dccp_v6_send_check,
1094 .rebuild_header = inet6_sk_rebuild_header,
1095 .conn_request = dccp_v6_conn_request,
1096 .syn_recv_sock = dccp_v6_request_recv_sock,
1097 .net_header_len = sizeof(struct ipv6hdr),
1098 .setsockopt = ipv6_setsockopt,
1099 .getsockopt = ipv6_getsockopt,
1100 .addr2sockaddr = inet6_csk_addr2sockaddr,
1101 .sockaddr_len = sizeof(struct sockaddr_in6),
1102 #ifdef CONFIG_COMPAT
1103 .compat_setsockopt = compat_ipv6_setsockopt,
1104 .compat_getsockopt = compat_ipv6_getsockopt,
1105 #endif
1106 };
1107
1108 /*
1109 * DCCP over IPv4 via INET6 API
1110 */
1111 static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1112 .queue_xmit = ip_queue_xmit,
1113 .send_check = dccp_v4_send_check,
1114 .rebuild_header = inet_sk_rebuild_header,
1115 .conn_request = dccp_v6_conn_request,
1116 .syn_recv_sock = dccp_v6_request_recv_sock,
1117 .net_header_len = sizeof(struct iphdr),
1118 .setsockopt = ipv6_setsockopt,
1119 .getsockopt = ipv6_getsockopt,
1120 .addr2sockaddr = inet6_csk_addr2sockaddr,
1121 .sockaddr_len = sizeof(struct sockaddr_in6),
1122 #ifdef CONFIG_COMPAT
1123 .compat_setsockopt = compat_ipv6_setsockopt,
1124 .compat_getsockopt = compat_ipv6_getsockopt,
1125 #endif
1126 };
1127
1128 /* NOTE: A lot of things set to zero explicitly by call to
1129 * sk_alloc() so need not be done here.
1130 */
1131 static int dccp_v6_init_sock(struct sock *sk)
1132 {
1133 static __u8 dccp_v6_ctl_sock_initialized;
1134 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1135
1136 if (err == 0) {
1137 if (unlikely(!dccp_v6_ctl_sock_initialized))
1138 dccp_v6_ctl_sock_initialized = 1;
1139 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1140 }
1141
1142 return err;
1143 }
1144
1145 static int dccp_v6_destroy_sock(struct sock *sk)
1146 {
1147 dccp_destroy_sock(sk);
1148 return inet6_destroy_sock(sk);
1149 }
1150
1151 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1152 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1153 };
1154
1155 static struct proto dccp_v6_prot = {
1156 .name = "DCCPv6",
1157 .owner = THIS_MODULE,
1158 .close = dccp_close,
1159 .connect = dccp_v6_connect,
1160 .disconnect = dccp_disconnect,
1161 .ioctl = dccp_ioctl,
1162 .init = dccp_v6_init_sock,
1163 .setsockopt = dccp_setsockopt,
1164 .getsockopt = dccp_getsockopt,
1165 .sendmsg = dccp_sendmsg,
1166 .recvmsg = dccp_recvmsg,
1167 .backlog_rcv = dccp_v6_do_rcv,
1168 .hash = dccp_v6_hash,
1169 .unhash = dccp_unhash,
1170 .accept = inet_csk_accept,
1171 .get_port = dccp_v6_get_port,
1172 .shutdown = dccp_shutdown,
1173 .destroy = dccp_v6_destroy_sock,
1174 .orphan_count = &dccp_orphan_count,
1175 .max_header = MAX_DCCP_HEADER,
1176 .obj_size = sizeof(struct dccp6_sock),
1177 .rsk_prot = &dccp6_request_sock_ops,
1178 .twsk_prot = &dccp6_timewait_sock_ops,
1179 #ifdef CONFIG_COMPAT
1180 .compat_setsockopt = compat_dccp_setsockopt,
1181 .compat_getsockopt = compat_dccp_getsockopt,
1182 #endif
1183 };
1184
1185 static struct inet6_protocol dccp_v6_protocol = {
1186 .handler = dccp_v6_rcv,
1187 .err_handler = dccp_v6_err,
1188 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1189 };
1190
1191 static struct proto_ops inet6_dccp_ops = {
1192 .family = PF_INET6,
1193 .owner = THIS_MODULE,
1194 .release = inet6_release,
1195 .bind = inet6_bind,
1196 .connect = inet_stream_connect,
1197 .socketpair = sock_no_socketpair,
1198 .accept = inet_accept,
1199 .getname = inet6_getname,
1200 .poll = dccp_poll,
1201 .ioctl = inet6_ioctl,
1202 .listen = inet_dccp_listen,
1203 .shutdown = inet_shutdown,
1204 .setsockopt = sock_common_setsockopt,
1205 .getsockopt = sock_common_getsockopt,
1206 .sendmsg = inet_sendmsg,
1207 .recvmsg = sock_common_recvmsg,
1208 .mmap = sock_no_mmap,
1209 .sendpage = sock_no_sendpage,
1210 #ifdef CONFIG_COMPAT
1211 .compat_setsockopt = compat_sock_common_setsockopt,
1212 .compat_getsockopt = compat_sock_common_getsockopt,
1213 #endif
1214 };
1215
1216 static struct inet_protosw dccp_v6_protosw = {
1217 .type = SOCK_DCCP,
1218 .protocol = IPPROTO_DCCP,
1219 .prot = &dccp_v6_prot,
1220 .ops = &inet6_dccp_ops,
1221 .capability = -1,
1222 .flags = INET_PROTOSW_ICSK,
1223 };
1224
1225 static int __init dccp_v6_init(void)
1226 {
1227 int err = proto_register(&dccp_v6_prot, 1);
1228
1229 if (err != 0)
1230 goto out;
1231
1232 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1233 if (err != 0)
1234 goto out_unregister_proto;
1235
1236 inet6_register_protosw(&dccp_v6_protosw);
1237
1238 err = inet_csk_ctl_sock_create(&dccp_v6_ctl_socket, PF_INET6,
1239 SOCK_DCCP, IPPROTO_DCCP);
1240 if (err != 0)
1241 goto out_unregister_protosw;
1242 out:
1243 return err;
1244 out_unregister_protosw:
1245 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1246 inet6_unregister_protosw(&dccp_v6_protosw);
1247 out_unregister_proto:
1248 proto_unregister(&dccp_v6_prot);
1249 goto out;
1250 }
1251
1252 static void __exit dccp_v6_exit(void)
1253 {
1254 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1255 inet6_unregister_protosw(&dccp_v6_protosw);
1256 proto_unregister(&dccp_v6_prot);
1257 }
1258
1259 module_init(dccp_v6_init);
1260 module_exit(dccp_v6_exit);
1261
1262 /*
1263 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1264 * values directly, Also cover the case where the protocol is not specified,
1265 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1266 */
1267 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6");
1268 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6");
1269 MODULE_LICENSE("GPL");
1270 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1271 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");