[TCP]: Move the tcp sock states to net/tcp_states.h
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / raw.c
1 /*
2 * RAW sockets for IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Adapted from linux/net/ipv4/raw.c
9 *
10 * $Id: raw.c,v 1.51 2002/02/01 22:01:04 davem Exp $
11 *
12 * Fixes:
13 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
15 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/sched.h>
28 #include <linux/net.h>
29 #include <linux/in6.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/icmpv6.h>
33 #include <linux/netfilter.h>
34 #include <linux/netfilter_ipv6.h>
35 #include <asm/uaccess.h>
36 #include <asm/ioctls.h>
37 #include <asm/bug.h>
38
39 #include <net/ip.h>
40 #include <net/sock.h>
41 #include <net/snmp.h>
42
43 #include <net/ipv6.h>
44 #include <net/ndisc.h>
45 #include <net/protocol.h>
46 #include <net/ip6_route.h>
47 #include <net/ip6_checksum.h>
48 #include <net/addrconf.h>
49 #include <net/transp_v6.h>
50 #include <net/udp.h>
51 #include <net/inet_common.h>
52 #include <net/tcp_states.h>
53
54 #include <net/rawv6.h>
55 #include <net/xfrm.h>
56
57 #include <linux/proc_fs.h>
58 #include <linux/seq_file.h>
59
60 struct hlist_head raw_v6_htable[RAWV6_HTABLE_SIZE];
61 DEFINE_RWLOCK(raw_v6_lock);
62
63 static void raw_v6_hash(struct sock *sk)
64 {
65 struct hlist_head *list = &raw_v6_htable[inet_sk(sk)->num &
66 (RAWV6_HTABLE_SIZE - 1)];
67
68 write_lock_bh(&raw_v6_lock);
69 sk_add_node(sk, list);
70 sock_prot_inc_use(sk->sk_prot);
71 write_unlock_bh(&raw_v6_lock);
72 }
73
74 static void raw_v6_unhash(struct sock *sk)
75 {
76 write_lock_bh(&raw_v6_lock);
77 if (sk_del_node_init(sk))
78 sock_prot_dec_use(sk->sk_prot);
79 write_unlock_bh(&raw_v6_lock);
80 }
81
82
83 /* Grumble... icmp and ip_input want to get at this... */
84 struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num,
85 struct in6_addr *loc_addr, struct in6_addr *rmt_addr,
86 int dif)
87 {
88 struct hlist_node *node;
89 int is_multicast = ipv6_addr_is_multicast(loc_addr);
90
91 sk_for_each_from(sk, node)
92 if (inet_sk(sk)->num == num) {
93 struct ipv6_pinfo *np = inet6_sk(sk);
94
95 if (!ipv6_addr_any(&np->daddr) &&
96 !ipv6_addr_equal(&np->daddr, rmt_addr))
97 continue;
98
99 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
100 continue;
101
102 if (!ipv6_addr_any(&np->rcv_saddr)) {
103 if (ipv6_addr_equal(&np->rcv_saddr, loc_addr))
104 goto found;
105 if (is_multicast &&
106 inet6_mc_check(sk, loc_addr, rmt_addr))
107 goto found;
108 continue;
109 }
110 goto found;
111 }
112 sk = NULL;
113 found:
114 return sk;
115 }
116
117 /*
118 * 0 - deliver
119 * 1 - block
120 */
121 static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
122 {
123 struct icmp6hdr *icmph;
124 struct raw6_sock *rp = raw6_sk(sk);
125
126 if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
127 __u32 *data = &rp->filter.data[0];
128 int bit_nr;
129
130 icmph = (struct icmp6hdr *) skb->data;
131 bit_nr = icmph->icmp6_type;
132
133 return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
134 }
135 return 0;
136 }
137
138 /*
139 * demultiplex raw sockets.
140 * (should consider queueing the skb in the sock receive_queue
141 * without calling rawv6.c)
142 *
143 * Caller owns SKB so we must make clones.
144 */
145 int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
146 {
147 struct in6_addr *saddr;
148 struct in6_addr *daddr;
149 struct sock *sk;
150 int delivered = 0;
151 __u8 hash;
152
153 saddr = &skb->nh.ipv6h->saddr;
154 daddr = saddr + 1;
155
156 hash = nexthdr & (MAX_INET_PROTOS - 1);
157
158 read_lock(&raw_v6_lock);
159 sk = sk_head(&raw_v6_htable[hash]);
160
161 /*
162 * The first socket found will be delivered after
163 * delivery to transport protocols.
164 */
165
166 if (sk == NULL)
167 goto out;
168
169 sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, skb->dev->ifindex);
170
171 while (sk) {
172 delivered = 1;
173 if (nexthdr != IPPROTO_ICMPV6 || !icmpv6_filter(sk, skb)) {
174 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
175
176 /* Not releasing hash table! */
177 if (clone)
178 rawv6_rcv(sk, clone);
179 }
180 sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr,
181 skb->dev->ifindex);
182 }
183 out:
184 read_unlock(&raw_v6_lock);
185 return delivered;
186 }
187
188 /* This cleans up af_inet6 a bit. -DaveM */
189 static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
190 {
191 struct inet_sock *inet = inet_sk(sk);
192 struct ipv6_pinfo *np = inet6_sk(sk);
193 struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
194 __u32 v4addr = 0;
195 int addr_type;
196 int err;
197
198 if (addr_len < SIN6_LEN_RFC2133)
199 return -EINVAL;
200 addr_type = ipv6_addr_type(&addr->sin6_addr);
201
202 /* Raw sockets are IPv6 only */
203 if (addr_type == IPV6_ADDR_MAPPED)
204 return(-EADDRNOTAVAIL);
205
206 lock_sock(sk);
207
208 err = -EINVAL;
209 if (sk->sk_state != TCP_CLOSE)
210 goto out;
211
212 /* Check if the address belongs to the host. */
213 if (addr_type != IPV6_ADDR_ANY) {
214 struct net_device *dev = NULL;
215
216 if (addr_type & IPV6_ADDR_LINKLOCAL) {
217 if (addr_len >= sizeof(struct sockaddr_in6) &&
218 addr->sin6_scope_id) {
219 /* Override any existing binding, if another
220 * one is supplied by user.
221 */
222 sk->sk_bound_dev_if = addr->sin6_scope_id;
223 }
224
225 /* Binding to link-local address requires an interface */
226 if (!sk->sk_bound_dev_if)
227 goto out;
228
229 dev = dev_get_by_index(sk->sk_bound_dev_if);
230 if (!dev) {
231 err = -ENODEV;
232 goto out;
233 }
234 }
235
236 /* ipv4 addr of the socket is invalid. Only the
237 * unspecified and mapped address have a v4 equivalent.
238 */
239 v4addr = LOOPBACK4_IPV6;
240 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
241 err = -EADDRNOTAVAIL;
242 if (!ipv6_chk_addr(&addr->sin6_addr, dev, 0)) {
243 if (dev)
244 dev_put(dev);
245 goto out;
246 }
247 }
248 if (dev)
249 dev_put(dev);
250 }
251
252 inet->rcv_saddr = inet->saddr = v4addr;
253 ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
254 if (!(addr_type & IPV6_ADDR_MULTICAST))
255 ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
256 err = 0;
257 out:
258 release_sock(sk);
259 return err;
260 }
261
262 void rawv6_err(struct sock *sk, struct sk_buff *skb,
263 struct inet6_skb_parm *opt,
264 int type, int code, int offset, u32 info)
265 {
266 struct inet_sock *inet = inet_sk(sk);
267 struct ipv6_pinfo *np = inet6_sk(sk);
268 int err;
269 int harderr;
270
271 /* Report error on raw socket, if:
272 1. User requested recverr.
273 2. Socket is connected (otherwise the error indication
274 is useless without recverr and error is hard.
275 */
276 if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
277 return;
278
279 harderr = icmpv6_err_convert(type, code, &err);
280 if (type == ICMPV6_PKT_TOOBIG)
281 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
282
283 if (np->recverr) {
284 u8 *payload = skb->data;
285 if (!inet->hdrincl)
286 payload += offset;
287 ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
288 }
289
290 if (np->recverr || harderr) {
291 sk->sk_err = err;
292 sk->sk_error_report(sk);
293 }
294 }
295
296 static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
297 {
298 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
299 skb->ip_summed != CHECKSUM_UNNECESSARY) {
300 if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
301 /* FIXME: increment a raw6 drops counter here */
302 kfree_skb(skb);
303 return 0;
304 }
305 skb->ip_summed = CHECKSUM_UNNECESSARY;
306 }
307
308 /* Charge it to the socket. */
309 if (sock_queue_rcv_skb(sk,skb)<0) {
310 /* FIXME: increment a raw6 drops counter here */
311 kfree_skb(skb);
312 return 0;
313 }
314
315 return 0;
316 }
317
318 /*
319 * This is next to useless...
320 * if we demultiplex in network layer we don't need the extra call
321 * just to queue the skb...
322 * maybe we could have the network decide upon a hint if it
323 * should call raw_rcv for demultiplexing
324 */
325 int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
326 {
327 struct inet_sock *inet = inet_sk(sk);
328 struct raw6_sock *rp = raw6_sk(sk);
329
330 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
331 kfree_skb(skb);
332 return NET_RX_DROP;
333 }
334
335 if (!rp->checksum)
336 skb->ip_summed = CHECKSUM_UNNECESSARY;
337
338 if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
339 if (skb->ip_summed == CHECKSUM_HW) {
340 skb_postpull_rcsum(skb, skb->nh.raw,
341 skb->h.raw - skb->nh.raw);
342 skb->ip_summed = CHECKSUM_UNNECESSARY;
343 if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
344 &skb->nh.ipv6h->daddr,
345 skb->len, inet->num, skb->csum)) {
346 LIMIT_NETDEBUG(
347 printk(KERN_DEBUG "raw v6 hw csum failure.\n"));
348 skb->ip_summed = CHECKSUM_NONE;
349 }
350 }
351 if (skb->ip_summed == CHECKSUM_NONE)
352 skb->csum = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
353 &skb->nh.ipv6h->daddr,
354 skb->len, inet->num, 0);
355 }
356
357 if (inet->hdrincl) {
358 if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
359 (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
360 /* FIXME: increment a raw6 drops counter here */
361 kfree_skb(skb);
362 return 0;
363 }
364 skb->ip_summed = CHECKSUM_UNNECESSARY;
365 }
366
367 rawv6_rcv_skb(sk, skb);
368 return 0;
369 }
370
371
372 /*
373 * This should be easy, if there is something there
374 * we return it, otherwise we block.
375 */
376
377 static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
378 struct msghdr *msg, size_t len,
379 int noblock, int flags, int *addr_len)
380 {
381 struct ipv6_pinfo *np = inet6_sk(sk);
382 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name;
383 struct sk_buff *skb;
384 size_t copied;
385 int err;
386
387 if (flags & MSG_OOB)
388 return -EOPNOTSUPP;
389
390 if (addr_len)
391 *addr_len=sizeof(*sin6);
392
393 if (flags & MSG_ERRQUEUE)
394 return ipv6_recv_error(sk, msg, len);
395
396 skb = skb_recv_datagram(sk, flags, noblock, &err);
397 if (!skb)
398 goto out;
399
400 copied = skb->len;
401 if (copied > len) {
402 copied = len;
403 msg->msg_flags |= MSG_TRUNC;
404 }
405
406 if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
407 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
408 } else if (msg->msg_flags&MSG_TRUNC) {
409 if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
410 goto csum_copy_err;
411 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
412 } else {
413 err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
414 if (err == -EINVAL)
415 goto csum_copy_err;
416 }
417 if (err)
418 goto out_free;
419
420 /* Copy the address. */
421 if (sin6) {
422 sin6->sin6_family = AF_INET6;
423 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr);
424 sin6->sin6_flowinfo = 0;
425 sin6->sin6_scope_id = 0;
426 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
427 sin6->sin6_scope_id = IP6CB(skb)->iif;
428 }
429
430 sock_recv_timestamp(msg, sk, skb);
431
432 if (np->rxopt.all)
433 datagram_recv_ctl(sk, msg, skb);
434
435 err = copied;
436 if (flags & MSG_TRUNC)
437 err = skb->len;
438
439 out_free:
440 skb_free_datagram(sk, skb);
441 out:
442 return err;
443
444 csum_copy_err:
445 /* Clear queue. */
446 if (flags&MSG_PEEK) {
447 int clear = 0;
448 spin_lock_bh(&sk->sk_receive_queue.lock);
449 if (skb == skb_peek(&sk->sk_receive_queue)) {
450 __skb_unlink(skb, &sk->sk_receive_queue);
451 clear = 1;
452 }
453 spin_unlock_bh(&sk->sk_receive_queue.lock);
454 if (clear)
455 kfree_skb(skb);
456 }
457
458 /* Error for blocking case is chosen to masquerade
459 as some normal condition.
460 */
461 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
462 /* FIXME: increment a raw6 drops counter here */
463 goto out_free;
464 }
465
466 static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
467 struct raw6_sock *rp)
468 {
469 struct sk_buff *skb;
470 int err = 0;
471 int offset;
472 int len;
473 int total_len;
474 u32 tmp_csum;
475 u16 csum;
476
477 if (!rp->checksum)
478 goto send;
479
480 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
481 goto out;
482
483 offset = rp->offset;
484 total_len = inet_sk(sk)->cork.length - (skb->nh.raw - skb->data);
485 if (offset >= total_len - 1) {
486 err = -EINVAL;
487 ip6_flush_pending_frames(sk);
488 goto out;
489 }
490
491 /* should be check HW csum miyazawa */
492 if (skb_queue_len(&sk->sk_write_queue) == 1) {
493 /*
494 * Only one fragment on the socket.
495 */
496 tmp_csum = skb->csum;
497 } else {
498 struct sk_buff *csum_skb = NULL;
499 tmp_csum = 0;
500
501 skb_queue_walk(&sk->sk_write_queue, skb) {
502 tmp_csum = csum_add(tmp_csum, skb->csum);
503
504 if (csum_skb)
505 continue;
506
507 len = skb->len - (skb->h.raw - skb->data);
508 if (offset >= len) {
509 offset -= len;
510 continue;
511 }
512
513 csum_skb = skb;
514 }
515
516 skb = csum_skb;
517 }
518
519 offset += skb->h.raw - skb->data;
520 if (skb_copy_bits(skb, offset, &csum, 2))
521 BUG();
522
523 /* in case cksum was not initialized */
524 if (unlikely(csum))
525 tmp_csum = csum_sub(tmp_csum, csum);
526
527 tmp_csum = csum_ipv6_magic(&fl->fl6_src,
528 &fl->fl6_dst,
529 total_len, fl->proto, tmp_csum);
530
531 if (tmp_csum == 0)
532 tmp_csum = -1;
533
534 csum = tmp_csum;
535 if (skb_store_bits(skb, offset, &csum, 2))
536 BUG();
537
538 send:
539 err = ip6_push_pending_frames(sk);
540 out:
541 return err;
542 }
543
544 static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
545 struct flowi *fl, struct rt6_info *rt,
546 unsigned int flags)
547 {
548 struct ipv6_pinfo *np = inet6_sk(sk);
549 struct ipv6hdr *iph;
550 struct sk_buff *skb;
551 unsigned int hh_len;
552 int err;
553
554 if (length > rt->u.dst.dev->mtu) {
555 ipv6_local_error(sk, EMSGSIZE, fl, rt->u.dst.dev->mtu);
556 return -EMSGSIZE;
557 }
558 if (flags&MSG_PROBE)
559 goto out;
560
561 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
562
563 skb = sock_alloc_send_skb(sk, length+hh_len+15,
564 flags&MSG_DONTWAIT, &err);
565 if (skb == NULL)
566 goto error;
567 skb_reserve(skb, hh_len);
568
569 skb->priority = sk->sk_priority;
570 skb->dst = dst_clone(&rt->u.dst);
571
572 skb->nh.ipv6h = iph = (struct ipv6hdr *)skb_put(skb, length);
573
574 skb->ip_summed = CHECKSUM_NONE;
575
576 skb->h.raw = skb->nh.raw;
577 err = memcpy_fromiovecend((void *)iph, from, 0, length);
578 if (err)
579 goto error_fault;
580
581 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
582 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
583 dst_output);
584 if (err > 0)
585 err = np->recverr ? net_xmit_errno(err) : 0;
586 if (err)
587 goto error;
588 out:
589 return 0;
590
591 error_fault:
592 err = -EFAULT;
593 kfree_skb(skb);
594 error:
595 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
596 return err;
597 }
598
599 static void rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
600 {
601 struct iovec *iov;
602 u8 __user *type = NULL;
603 u8 __user *code = NULL;
604 int probed = 0;
605 int i;
606
607 if (!msg->msg_iov)
608 return;
609
610 for (i = 0; i < msg->msg_iovlen; i++) {
611 iov = &msg->msg_iov[i];
612 if (!iov)
613 continue;
614
615 switch (fl->proto) {
616 case IPPROTO_ICMPV6:
617 /* check if one-byte field is readable or not. */
618 if (iov->iov_base && iov->iov_len < 1)
619 break;
620
621 if (!type) {
622 type = iov->iov_base;
623 /* check if code field is readable or not. */
624 if (iov->iov_len > 1)
625 code = type + 1;
626 } else if (!code)
627 code = iov->iov_base;
628
629 if (type && code) {
630 get_user(fl->fl_icmp_type, type);
631 __get_user(fl->fl_icmp_code, code);
632 probed = 1;
633 }
634 break;
635 default:
636 probed = 1;
637 break;
638 }
639 if (probed)
640 break;
641 }
642 }
643
644 static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
645 struct msghdr *msg, size_t len)
646 {
647 struct ipv6_txoptions opt_space;
648 struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
649 struct in6_addr *daddr, *final_p = NULL, final;
650 struct inet_sock *inet = inet_sk(sk);
651 struct ipv6_pinfo *np = inet6_sk(sk);
652 struct raw6_sock *rp = raw6_sk(sk);
653 struct ipv6_txoptions *opt = NULL;
654 struct ip6_flowlabel *flowlabel = NULL;
655 struct dst_entry *dst = NULL;
656 struct flowi fl;
657 int addr_len = msg->msg_namelen;
658 int hlimit = -1;
659 u16 proto;
660 int err;
661
662 /* Rough check on arithmetic overflow,
663 better check is made in ip6_build_xmit
664 */
665 if (len < 0)
666 return -EMSGSIZE;
667
668 /* Mirror BSD error message compatibility */
669 if (msg->msg_flags & MSG_OOB)
670 return -EOPNOTSUPP;
671
672 /*
673 * Get and verify the address.
674 */
675 memset(&fl, 0, sizeof(fl));
676
677 if (sin6) {
678 if (addr_len < SIN6_LEN_RFC2133)
679 return -EINVAL;
680
681 if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
682 return(-EAFNOSUPPORT);
683
684 /* port is the proto value [0..255] carried in nexthdr */
685 proto = ntohs(sin6->sin6_port);
686
687 if (!proto)
688 proto = inet->num;
689 else if (proto != inet->num)
690 return(-EINVAL);
691
692 if (proto > 255)
693 return(-EINVAL);
694
695 daddr = &sin6->sin6_addr;
696 if (np->sndflow) {
697 fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
698 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
699 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
700 if (flowlabel == NULL)
701 return -EINVAL;
702 daddr = &flowlabel->dst;
703 }
704 }
705
706 /*
707 * Otherwise it will be difficult to maintain
708 * sk->sk_dst_cache.
709 */
710 if (sk->sk_state == TCP_ESTABLISHED &&
711 ipv6_addr_equal(daddr, &np->daddr))
712 daddr = &np->daddr;
713
714 if (addr_len >= sizeof(struct sockaddr_in6) &&
715 sin6->sin6_scope_id &&
716 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
717 fl.oif = sin6->sin6_scope_id;
718 } else {
719 if (sk->sk_state != TCP_ESTABLISHED)
720 return -EDESTADDRREQ;
721
722 proto = inet->num;
723 daddr = &np->daddr;
724 fl.fl6_flowlabel = np->flow_label;
725 }
726
727 if (ipv6_addr_any(daddr)) {
728 /*
729 * unspecified destination address
730 * treated as error... is this correct ?
731 */
732 fl6_sock_release(flowlabel);
733 return(-EINVAL);
734 }
735
736 if (fl.oif == 0)
737 fl.oif = sk->sk_bound_dev_if;
738
739 if (msg->msg_controllen) {
740 opt = &opt_space;
741 memset(opt, 0, sizeof(struct ipv6_txoptions));
742 opt->tot_len = sizeof(struct ipv6_txoptions);
743
744 err = datagram_send_ctl(msg, &fl, opt, &hlimit);
745 if (err < 0) {
746 fl6_sock_release(flowlabel);
747 return err;
748 }
749 if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
750 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
751 if (flowlabel == NULL)
752 return -EINVAL;
753 }
754 if (!(opt->opt_nflen|opt->opt_flen))
755 opt = NULL;
756 }
757 if (opt == NULL)
758 opt = np->opt;
759 if (flowlabel)
760 opt = fl6_merge_options(&opt_space, flowlabel, opt);
761
762 fl.proto = proto;
763 rawv6_probe_proto_opt(&fl, msg);
764
765 ipv6_addr_copy(&fl.fl6_dst, daddr);
766 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
767 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
768
769 /* merge ip6_build_xmit from ip6_output */
770 if (opt && opt->srcrt) {
771 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
772 ipv6_addr_copy(&final, &fl.fl6_dst);
773 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
774 final_p = &final;
775 }
776
777 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
778 fl.oif = np->mcast_oif;
779
780 err = ip6_dst_lookup(sk, &dst, &fl);
781 if (err)
782 goto out;
783 if (final_p)
784 ipv6_addr_copy(&fl.fl6_dst, final_p);
785
786 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
787 dst_release(dst);
788 goto out;
789 }
790
791 if (hlimit < 0) {
792 if (ipv6_addr_is_multicast(&fl.fl6_dst))
793 hlimit = np->mcast_hops;
794 else
795 hlimit = np->hop_limit;
796 if (hlimit < 0)
797 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
798 if (hlimit < 0)
799 hlimit = ipv6_get_hoplimit(dst->dev);
800 }
801
802 if (msg->msg_flags&MSG_CONFIRM)
803 goto do_confirm;
804
805 back_from_confirm:
806 if (inet->hdrincl) {
807 err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, (struct rt6_info*)dst, msg->msg_flags);
808 } else {
809 lock_sock(sk);
810 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
811 hlimit, opt, &fl, (struct rt6_info*)dst, msg->msg_flags);
812
813 if (err)
814 ip6_flush_pending_frames(sk);
815 else if (!(msg->msg_flags & MSG_MORE))
816 err = rawv6_push_pending_frames(sk, &fl, rp);
817 }
818 done:
819 ip6_dst_store(sk, dst,
820 ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ?
821 &np->daddr : NULL);
822
823 release_sock(sk);
824 out:
825 fl6_sock_release(flowlabel);
826 return err<0?err:len;
827 do_confirm:
828 dst_confirm(dst);
829 if (!(msg->msg_flags & MSG_PROBE) || len)
830 goto back_from_confirm;
831 err = 0;
832 goto done;
833 }
834
835 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
836 char __user *optval, int optlen)
837 {
838 switch (optname) {
839 case ICMPV6_FILTER:
840 if (optlen > sizeof(struct icmp6_filter))
841 optlen = sizeof(struct icmp6_filter);
842 if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
843 return -EFAULT;
844 return 0;
845 default:
846 return -ENOPROTOOPT;
847 };
848
849 return 0;
850 }
851
852 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
853 char __user *optval, int __user *optlen)
854 {
855 int len;
856
857 switch (optname) {
858 case ICMPV6_FILTER:
859 if (get_user(len, optlen))
860 return -EFAULT;
861 if (len < 0)
862 return -EINVAL;
863 if (len > sizeof(struct icmp6_filter))
864 len = sizeof(struct icmp6_filter);
865 if (put_user(len, optlen))
866 return -EFAULT;
867 if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
868 return -EFAULT;
869 return 0;
870 default:
871 return -ENOPROTOOPT;
872 };
873
874 return 0;
875 }
876
877
878 static int rawv6_setsockopt(struct sock *sk, int level, int optname,
879 char __user *optval, int optlen)
880 {
881 struct raw6_sock *rp = raw6_sk(sk);
882 int val;
883
884 switch(level) {
885 case SOL_RAW:
886 break;
887
888 case SOL_ICMPV6:
889 if (inet_sk(sk)->num != IPPROTO_ICMPV6)
890 return -EOPNOTSUPP;
891 return rawv6_seticmpfilter(sk, level, optname, optval,
892 optlen);
893 case SOL_IPV6:
894 if (optname == IPV6_CHECKSUM)
895 break;
896 default:
897 return ipv6_setsockopt(sk, level, optname, optval,
898 optlen);
899 };
900
901 if (get_user(val, (int __user *)optval))
902 return -EFAULT;
903
904 switch (optname) {
905 case IPV6_CHECKSUM:
906 /* You may get strange result with a positive odd offset;
907 RFC2292bis agrees with me. */
908 if (val > 0 && (val&1))
909 return(-EINVAL);
910 if (val < 0) {
911 rp->checksum = 0;
912 } else {
913 rp->checksum = 1;
914 rp->offset = val;
915 }
916
917 return 0;
918 break;
919
920 default:
921 return(-ENOPROTOOPT);
922 }
923 }
924
925 static int rawv6_getsockopt(struct sock *sk, int level, int optname,
926 char __user *optval, int __user *optlen)
927 {
928 struct raw6_sock *rp = raw6_sk(sk);
929 int val, len;
930
931 switch(level) {
932 case SOL_RAW:
933 break;
934
935 case SOL_ICMPV6:
936 if (inet_sk(sk)->num != IPPROTO_ICMPV6)
937 return -EOPNOTSUPP;
938 return rawv6_geticmpfilter(sk, level, optname, optval,
939 optlen);
940 case SOL_IPV6:
941 if (optname == IPV6_CHECKSUM)
942 break;
943 default:
944 return ipv6_getsockopt(sk, level, optname, optval,
945 optlen);
946 };
947
948 if (get_user(len,optlen))
949 return -EFAULT;
950
951 switch (optname) {
952 case IPV6_CHECKSUM:
953 if (rp->checksum == 0)
954 val = -1;
955 else
956 val = rp->offset;
957 break;
958
959 default:
960 return -ENOPROTOOPT;
961 }
962
963 len = min_t(unsigned int, sizeof(int), len);
964
965 if (put_user(len, optlen))
966 return -EFAULT;
967 if (copy_to_user(optval,&val,len))
968 return -EFAULT;
969 return 0;
970 }
971
972 static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
973 {
974 switch(cmd) {
975 case SIOCOUTQ:
976 {
977 int amount = atomic_read(&sk->sk_wmem_alloc);
978 return put_user(amount, (int __user *)arg);
979 }
980 case SIOCINQ:
981 {
982 struct sk_buff *skb;
983 int amount = 0;
984
985 spin_lock_bh(&sk->sk_receive_queue.lock);
986 skb = skb_peek(&sk->sk_receive_queue);
987 if (skb != NULL)
988 amount = skb->tail - skb->h.raw;
989 spin_unlock_bh(&sk->sk_receive_queue.lock);
990 return put_user(amount, (int __user *)arg);
991 }
992
993 default:
994 return -ENOIOCTLCMD;
995 }
996 }
997
998 static void rawv6_close(struct sock *sk, long timeout)
999 {
1000 if (inet_sk(sk)->num == IPPROTO_RAW)
1001 ip6_ra_control(sk, -1, NULL);
1002
1003 sk_common_release(sk);
1004 }
1005
1006 static int rawv6_init_sk(struct sock *sk)
1007 {
1008 if (inet_sk(sk)->num == IPPROTO_ICMPV6) {
1009 struct raw6_sock *rp = raw6_sk(sk);
1010 rp->checksum = 1;
1011 rp->offset = 2;
1012 }
1013 return(0);
1014 }
1015
1016 struct proto rawv6_prot = {
1017 .name = "RAWv6",
1018 .owner = THIS_MODULE,
1019 .close = rawv6_close,
1020 .connect = ip6_datagram_connect,
1021 .disconnect = udp_disconnect,
1022 .ioctl = rawv6_ioctl,
1023 .init = rawv6_init_sk,
1024 .destroy = inet6_destroy_sock,
1025 .setsockopt = rawv6_setsockopt,
1026 .getsockopt = rawv6_getsockopt,
1027 .sendmsg = rawv6_sendmsg,
1028 .recvmsg = rawv6_recvmsg,
1029 .bind = rawv6_bind,
1030 .backlog_rcv = rawv6_rcv_skb,
1031 .hash = raw_v6_hash,
1032 .unhash = raw_v6_unhash,
1033 .obj_size = sizeof(struct raw6_sock),
1034 };
1035
1036 #ifdef CONFIG_PROC_FS
1037 struct raw6_iter_state {
1038 int bucket;
1039 };
1040
1041 #define raw6_seq_private(seq) ((struct raw6_iter_state *)(seq)->private)
1042
1043 static struct sock *raw6_get_first(struct seq_file *seq)
1044 {
1045 struct sock *sk;
1046 struct hlist_node *node;
1047 struct raw6_iter_state* state = raw6_seq_private(seq);
1048
1049 for (state->bucket = 0; state->bucket < RAWV6_HTABLE_SIZE; ++state->bucket)
1050 sk_for_each(sk, node, &raw_v6_htable[state->bucket])
1051 if (sk->sk_family == PF_INET6)
1052 goto out;
1053 sk = NULL;
1054 out:
1055 return sk;
1056 }
1057
1058 static struct sock *raw6_get_next(struct seq_file *seq, struct sock *sk)
1059 {
1060 struct raw6_iter_state* state = raw6_seq_private(seq);
1061
1062 do {
1063 sk = sk_next(sk);
1064 try_again:
1065 ;
1066 } while (sk && sk->sk_family != PF_INET6);
1067
1068 if (!sk && ++state->bucket < RAWV6_HTABLE_SIZE) {
1069 sk = sk_head(&raw_v6_htable[state->bucket]);
1070 goto try_again;
1071 }
1072 return sk;
1073 }
1074
1075 static struct sock *raw6_get_idx(struct seq_file *seq, loff_t pos)
1076 {
1077 struct sock *sk = raw6_get_first(seq);
1078 if (sk)
1079 while (pos && (sk = raw6_get_next(seq, sk)) != NULL)
1080 --pos;
1081 return pos ? NULL : sk;
1082 }
1083
1084 static void *raw6_seq_start(struct seq_file *seq, loff_t *pos)
1085 {
1086 read_lock(&raw_v6_lock);
1087 return *pos ? raw6_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1088 }
1089
1090 static void *raw6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1091 {
1092 struct sock *sk;
1093
1094 if (v == SEQ_START_TOKEN)
1095 sk = raw6_get_first(seq);
1096 else
1097 sk = raw6_get_next(seq, v);
1098 ++*pos;
1099 return sk;
1100 }
1101
1102 static void raw6_seq_stop(struct seq_file *seq, void *v)
1103 {
1104 read_unlock(&raw_v6_lock);
1105 }
1106
1107 static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1108 {
1109 struct ipv6_pinfo *np = inet6_sk(sp);
1110 struct in6_addr *dest, *src;
1111 __u16 destp, srcp;
1112
1113 dest = &np->daddr;
1114 src = &np->rcv_saddr;
1115 destp = 0;
1116 srcp = inet_sk(sp)->num;
1117 seq_printf(seq,
1118 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1119 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p\n",
1120 i,
1121 src->s6_addr32[0], src->s6_addr32[1],
1122 src->s6_addr32[2], src->s6_addr32[3], srcp,
1123 dest->s6_addr32[0], dest->s6_addr32[1],
1124 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1125 sp->sk_state,
1126 atomic_read(&sp->sk_wmem_alloc),
1127 atomic_read(&sp->sk_rmem_alloc),
1128 0, 0L, 0,
1129 sock_i_uid(sp), 0,
1130 sock_i_ino(sp),
1131 atomic_read(&sp->sk_refcnt), sp);
1132 }
1133
1134 static int raw6_seq_show(struct seq_file *seq, void *v)
1135 {
1136 if (v == SEQ_START_TOKEN)
1137 seq_printf(seq,
1138 " sl "
1139 "local_address "
1140 "remote_address "
1141 "st tx_queue rx_queue tr tm->when retrnsmt"
1142 " uid timeout inode\n");
1143 else
1144 raw6_sock_seq_show(seq, v, raw6_seq_private(seq)->bucket);
1145 return 0;
1146 }
1147
1148 static struct seq_operations raw6_seq_ops = {
1149 .start = raw6_seq_start,
1150 .next = raw6_seq_next,
1151 .stop = raw6_seq_stop,
1152 .show = raw6_seq_show,
1153 };
1154
1155 static int raw6_seq_open(struct inode *inode, struct file *file)
1156 {
1157 struct seq_file *seq;
1158 int rc = -ENOMEM;
1159 struct raw6_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1160 if (!s)
1161 goto out;
1162 rc = seq_open(file, &raw6_seq_ops);
1163 if (rc)
1164 goto out_kfree;
1165 seq = file->private_data;
1166 seq->private = s;
1167 memset(s, 0, sizeof(*s));
1168 out:
1169 return rc;
1170 out_kfree:
1171 kfree(s);
1172 goto out;
1173 }
1174
1175 static struct file_operations raw6_seq_fops = {
1176 .owner = THIS_MODULE,
1177 .open = raw6_seq_open,
1178 .read = seq_read,
1179 .llseek = seq_lseek,
1180 .release = seq_release_private,
1181 };
1182
1183 int __init raw6_proc_init(void)
1184 {
1185 if (!proc_net_fops_create("raw6", S_IRUGO, &raw6_seq_fops))
1186 return -ENOMEM;
1187 return 0;
1188 }
1189
1190 void raw6_proc_exit(void)
1191 {
1192 proc_net_remove("raw6");
1193 }
1194 #endif /* CONFIG_PROC_FS */