import PULS_20180308
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / ip_sockglue.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The IP to API glue.
7 *
8 * Authors: see ip.c
9 *
10 * Fixes:
11 * Many : Split from ip.c , see ip.c for history.
12 * Martin Mares : TOS setting fixed.
13 * Alan Cox : Fixed a couple of oopses in Martin's
14 * TOS tweaks.
15 * Mike McLagan : Routing by source
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/skbuff.h>
22 #include <linux/ip.h>
23 #include <linux/icmp.h>
24 #include <linux/inetdevice.h>
25 #include <linux/netdevice.h>
26 #include <linux/slab.h>
27 #include <net/sock.h>
28 #include <net/ip.h>
29 #include <net/icmp.h>
30 #include <net/tcp_states.h>
31 #include <linux/udp.h>
32 #include <linux/igmp.h>
33 #include <linux/netfilter.h>
34 #include <linux/route.h>
35 #include <linux/mroute.h>
36 #include <net/inet_ecn.h>
37 #include <net/route.h>
38 #include <net/xfrm.h>
39 #include <net/compat.h>
40 #if IS_ENABLED(CONFIG_IPV6)
41 #include <net/transp_v6.h>
42 #endif
43 #include <net/ip_fib.h>
44
45 #include <linux/errqueue.h>
46 #include <asm/uaccess.h>
47
48 #define IP_CMSG_PKTINFO 1
49 #define IP_CMSG_TTL 2
50 #define IP_CMSG_TOS 4
51 #define IP_CMSG_RECVOPTS 8
52 #define IP_CMSG_RETOPTS 16
53 #define IP_CMSG_PASSSEC 32
54 #define IP_CMSG_ORIGDSTADDR 64
55
56 /*
57 * SOL_IP control messages.
58 */
59 #define PKTINFO_SKB_CB(__skb) ((struct in_pktinfo *)((__skb)->cb))
60
61 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
62 {
63 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
64
65 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
66
67 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
68 }
69
70 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
71 {
72 int ttl = ip_hdr(skb)->ttl;
73 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
74 }
75
76 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
77 {
78 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
79 }
80
81 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
82 {
83 if (IPCB(skb)->opt.optlen == 0)
84 return;
85
86 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
87 ip_hdr(skb) + 1);
88 }
89
90
91 static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
92 {
93 unsigned char optbuf[sizeof(struct ip_options) + 40];
94 struct ip_options *opt = (struct ip_options *)optbuf;
95
96 if (IPCB(skb)->opt.optlen == 0)
97 return;
98
99 if (ip_options_echo(opt, skb)) {
100 msg->msg_flags |= MSG_CTRUNC;
101 return;
102 }
103 ip_options_undo(opt);
104
105 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
106 }
107
108 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
109 {
110 char *secdata;
111 u32 seclen, secid;
112 int err;
113
114 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
115 if (err)
116 return;
117
118 err = security_secid_to_secctx(secid, &secdata, &seclen);
119 if (err)
120 return;
121
122 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
123 security_release_secctx(secdata, seclen);
124 }
125
126 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
127 {
128 struct sockaddr_in sin;
129 const struct iphdr *iph = ip_hdr(skb);
130 __be16 *ports = (__be16 *)skb_transport_header(skb);
131
132 if (skb_transport_offset(skb) + 4 > skb->len)
133 return;
134
135 /* All current transport protocols have the port numbers in the
136 * first four bytes of the transport header and this function is
137 * written with this assumption in mind.
138 */
139
140 sin.sin_family = AF_INET;
141 sin.sin_addr.s_addr = iph->daddr;
142 sin.sin_port = ports[1];
143 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
144
145 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
146 }
147
148 void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
149 {
150 struct inet_sock *inet = inet_sk(skb->sk);
151 unsigned int flags = inet->cmsg_flags;
152
153 /* Ordered by supposed usage frequency */
154 if (flags & 1)
155 ip_cmsg_recv_pktinfo(msg, skb);
156 if ((flags >>= 1) == 0)
157 return;
158
159 if (flags & 1)
160 ip_cmsg_recv_ttl(msg, skb);
161 if ((flags >>= 1) == 0)
162 return;
163
164 if (flags & 1)
165 ip_cmsg_recv_tos(msg, skb);
166 if ((flags >>= 1) == 0)
167 return;
168
169 if (flags & 1)
170 ip_cmsg_recv_opts(msg, skb);
171 if ((flags >>= 1) == 0)
172 return;
173
174 if (flags & 1)
175 ip_cmsg_recv_retopts(msg, skb);
176 if ((flags >>= 1) == 0)
177 return;
178
179 if (flags & 1)
180 ip_cmsg_recv_security(msg, skb);
181
182 if ((flags >>= 1) == 0)
183 return;
184 if (flags & 1)
185 ip_cmsg_recv_dstaddr(msg, skb);
186
187 }
188 EXPORT_SYMBOL(ip_cmsg_recv);
189
190 int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
191 {
192 int err;
193 struct cmsghdr *cmsg;
194
195 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
196 if (!CMSG_OK(msg, cmsg))
197 return -EINVAL;
198 if (cmsg->cmsg_level != SOL_IP)
199 continue;
200 switch (cmsg->cmsg_type) {
201 case IP_RETOPTS:
202 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
203 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
204 err < 40 ? err : 40);
205 if (err)
206 return err;
207 break;
208 case IP_PKTINFO:
209 {
210 struct in_pktinfo *info;
211 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
212 return -EINVAL;
213 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
214 ipc->oif = info->ipi_ifindex;
215 ipc->addr = info->ipi_spec_dst.s_addr;
216 break;
217 }
218 default:
219 return -EINVAL;
220 }
221 }
222 return 0;
223 }
224
225
226 /* Special input handler for packets caught by router alert option.
227 They are selected only by protocol field, and then processed likely
228 local ones; but only if someone wants them! Otherwise, router
229 not running rsvpd will kill RSVP.
230
231 It is user level problem, what it will make with them.
232 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
233 but receiver should be enough clever f.e. to forward mtrace requests,
234 sent to multicast group to reach destination designated router.
235 */
236 struct ip_ra_chain __rcu *ip_ra_chain;
237 static DEFINE_SPINLOCK(ip_ra_lock);
238
239
240 static void ip_ra_destroy_rcu(struct rcu_head *head)
241 {
242 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
243
244 sock_put(ra->saved_sk);
245 kfree(ra);
246 }
247
248 int ip_ra_control(struct sock *sk, unsigned char on,
249 void (*destructor)(struct sock *))
250 {
251 struct ip_ra_chain *ra, *new_ra;
252 struct ip_ra_chain __rcu **rap;
253
254 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
255 return -EINVAL;
256
257 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
258
259 spin_lock_bh(&ip_ra_lock);
260 for (rap = &ip_ra_chain;
261 (ra = rcu_dereference_protected(*rap,
262 lockdep_is_held(&ip_ra_lock))) != NULL;
263 rap = &ra->next) {
264 if (ra->sk == sk) {
265 if (on) {
266 spin_unlock_bh(&ip_ra_lock);
267 kfree(new_ra);
268 return -EADDRINUSE;
269 }
270 /* dont let ip_call_ra_chain() use sk again */
271 ra->sk = NULL;
272 rcu_assign_pointer(*rap, ra->next);
273 spin_unlock_bh(&ip_ra_lock);
274
275 if (ra->destructor)
276 ra->destructor(sk);
277 /*
278 * Delay sock_put(sk) and kfree(ra) after one rcu grace
279 * period. This guarantee ip_call_ra_chain() dont need
280 * to mess with socket refcounts.
281 */
282 ra->saved_sk = sk;
283 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
284 return 0;
285 }
286 }
287 if (new_ra == NULL) {
288 spin_unlock_bh(&ip_ra_lock);
289 return -ENOBUFS;
290 }
291 new_ra->sk = sk;
292 new_ra->destructor = destructor;
293
294 new_ra->next = ra;
295 rcu_assign_pointer(*rap, new_ra);
296 sock_hold(sk);
297 spin_unlock_bh(&ip_ra_lock);
298
299 return 0;
300 }
301
302 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
303 __be16 port, u32 info, u8 *payload)
304 {
305 struct sock_exterr_skb *serr;
306
307 skb = skb_clone(skb, GFP_ATOMIC);
308 if (!skb)
309 return;
310
311 serr = SKB_EXT_ERR(skb);
312 serr->ee.ee_errno = err;
313 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
314 serr->ee.ee_type = icmp_hdr(skb)->type;
315 serr->ee.ee_code = icmp_hdr(skb)->code;
316 serr->ee.ee_pad = 0;
317 serr->ee.ee_info = info;
318 serr->ee.ee_data = 0;
319 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
320 skb_network_header(skb);
321 serr->port = port;
322
323 if (skb_pull(skb, payload - skb->data) != NULL) {
324 skb_reset_transport_header(skb);
325 if (sock_queue_err_skb(sk, skb) == 0)
326 return;
327 }
328 kfree_skb(skb);
329 }
330
331 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
332 {
333 struct inet_sock *inet = inet_sk(sk);
334 struct sock_exterr_skb *serr;
335 struct iphdr *iph;
336 struct sk_buff *skb;
337
338 if (!inet->recverr)
339 return;
340
341 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
342 if (!skb)
343 return;
344
345 skb_put(skb, sizeof(struct iphdr));
346 skb_reset_network_header(skb);
347 iph = ip_hdr(skb);
348 iph->daddr = daddr;
349
350 serr = SKB_EXT_ERR(skb);
351 serr->ee.ee_errno = err;
352 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
353 serr->ee.ee_type = 0;
354 serr->ee.ee_code = 0;
355 serr->ee.ee_pad = 0;
356 serr->ee.ee_info = info;
357 serr->ee.ee_data = 0;
358 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
359 serr->port = port;
360
361 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
362 skb_reset_transport_header(skb);
363
364 if (sock_queue_err_skb(sk, skb))
365 kfree_skb(skb);
366 }
367
368 /*
369 * Handle MSG_ERRQUEUE
370 */
371 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
372 {
373 struct sock_exterr_skb *serr;
374 struct sk_buff *skb, *skb2;
375 struct sockaddr_in *sin;
376 struct {
377 struct sock_extended_err ee;
378 struct sockaddr_in offender;
379 } errhdr;
380 int err;
381 int copied;
382
383 err = -EAGAIN;
384 skb = skb_dequeue(&sk->sk_error_queue);
385 if (skb == NULL)
386 goto out;
387
388 copied = skb->len;
389 if (copied > len) {
390 msg->msg_flags |= MSG_TRUNC;
391 copied = len;
392 }
393 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
394 if (err)
395 goto out_free_skb;
396
397 sock_recv_timestamp(msg, sk, skb);
398
399 serr = SKB_EXT_ERR(skb);
400
401 sin = (struct sockaddr_in *)msg->msg_name;
402 if (sin) {
403 sin->sin_family = AF_INET;
404 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
405 serr->addr_offset);
406 sin->sin_port = serr->port;
407 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
408 *addr_len = sizeof(*sin);
409 }
410
411 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
412 sin = &errhdr.offender;
413 sin->sin_family = AF_UNSPEC;
414 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) {
415 struct inet_sock *inet = inet_sk(sk);
416
417 sin->sin_family = AF_INET;
418 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
419 sin->sin_port = 0;
420 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
421 if (inet->cmsg_flags)
422 ip_cmsg_recv(msg, skb);
423 }
424
425 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
426
427 /* Now we could try to dump offended packet options */
428
429 msg->msg_flags |= MSG_ERRQUEUE;
430 err = copied;
431
432 /* Reset and regenerate socket error */
433 spin_lock_bh(&sk->sk_error_queue.lock);
434 sk->sk_err = 0;
435 skb2 = skb_peek(&sk->sk_error_queue);
436 if (skb2 != NULL) {
437 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
438 spin_unlock_bh(&sk->sk_error_queue.lock);
439 sk->sk_error_report(sk);
440 } else
441 spin_unlock_bh(&sk->sk_error_queue.lock);
442
443 out_free_skb:
444 kfree_skb(skb);
445 out:
446 return err;
447 }
448
449
450 /*
451 * Socket option code for IP. This is the end of the line after any
452 * TCP,UDP etc options on an IP socket.
453 */
454
455 static int do_ip_setsockopt(struct sock *sk, int level,
456 int optname, char __user *optval, unsigned int optlen)
457 {
458 struct inet_sock *inet = inet_sk(sk);
459 int val = 0, err;
460
461 switch (optname) {
462 case IP_PKTINFO:
463 case IP_RECVTTL:
464 case IP_RECVOPTS:
465 case IP_RECVTOS:
466 case IP_RETOPTS:
467 case IP_TOS:
468 case IP_TTL:
469 case IP_HDRINCL:
470 case IP_MTU_DISCOVER:
471 case IP_RECVERR:
472 case IP_ROUTER_ALERT:
473 case IP_FREEBIND:
474 case IP_PASSSEC:
475 case IP_TRANSPARENT:
476 case IP_MINTTL:
477 case IP_NODEFRAG:
478 case IP_UNICAST_IF:
479 case IP_MULTICAST_TTL:
480 case IP_MULTICAST_ALL:
481 case IP_MULTICAST_LOOP:
482 case IP_RECVORIGDSTADDR:
483 if (optlen >= sizeof(int)) {
484 if (get_user(val, (int __user *) optval))
485 return -EFAULT;
486 } else if (optlen >= sizeof(char)) {
487 unsigned char ucval;
488
489 if (get_user(ucval, (unsigned char __user *) optval))
490 return -EFAULT;
491 val = (int) ucval;
492 }
493 }
494
495 /* If optlen==0, it is equivalent to val == 0 */
496
497 if (ip_mroute_opt(optname))
498 return ip_mroute_setsockopt(sk, optname, optval, optlen);
499
500 err = 0;
501 lock_sock(sk);
502
503 switch (optname) {
504 case IP_OPTIONS:
505 {
506 struct ip_options_rcu *old, *opt = NULL;
507
508 if (optlen > 40)
509 goto e_inval;
510 err = ip_options_get_from_user(sock_net(sk), &opt,
511 optval, optlen);
512 if (err)
513 break;
514 old = rcu_dereference_protected(inet->inet_opt,
515 sock_owned_by_user(sk));
516 if (inet->is_icsk) {
517 struct inet_connection_sock *icsk = inet_csk(sk);
518 #if IS_ENABLED(CONFIG_IPV6)
519 if (sk->sk_family == PF_INET ||
520 (!((1 << sk->sk_state) &
521 (TCPF_LISTEN | TCPF_CLOSE)) &&
522 inet->inet_daddr != LOOPBACK4_IPV6)) {
523 #endif
524 if (old)
525 icsk->icsk_ext_hdr_len -= old->opt.optlen;
526 if (opt)
527 icsk->icsk_ext_hdr_len += opt->opt.optlen;
528 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
529 #if IS_ENABLED(CONFIG_IPV6)
530 }
531 #endif
532 }
533 rcu_assign_pointer(inet->inet_opt, opt);
534 if (old)
535 kfree_rcu(old, rcu);
536 break;
537 }
538 case IP_PKTINFO:
539 if (val)
540 inet->cmsg_flags |= IP_CMSG_PKTINFO;
541 else
542 inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
543 break;
544 case IP_RECVTTL:
545 if (val)
546 inet->cmsg_flags |= IP_CMSG_TTL;
547 else
548 inet->cmsg_flags &= ~IP_CMSG_TTL;
549 break;
550 case IP_RECVTOS:
551 if (val)
552 inet->cmsg_flags |= IP_CMSG_TOS;
553 else
554 inet->cmsg_flags &= ~IP_CMSG_TOS;
555 break;
556 case IP_RECVOPTS:
557 if (val)
558 inet->cmsg_flags |= IP_CMSG_RECVOPTS;
559 else
560 inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
561 break;
562 case IP_RETOPTS:
563 if (val)
564 inet->cmsg_flags |= IP_CMSG_RETOPTS;
565 else
566 inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
567 break;
568 case IP_PASSSEC:
569 if (val)
570 inet->cmsg_flags |= IP_CMSG_PASSSEC;
571 else
572 inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
573 break;
574 case IP_RECVORIGDSTADDR:
575 if (val)
576 inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
577 else
578 inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
579 break;
580 case IP_TOS: /* This sets both TOS and Precedence */
581 if (sk->sk_type == SOCK_STREAM) {
582 val &= ~INET_ECN_MASK;
583 val |= inet->tos & INET_ECN_MASK;
584 }
585 if (inet->tos != val) {
586 inet->tos = val;
587 sk->sk_priority = rt_tos2priority(val);
588 sk_dst_reset(sk);
589 }
590 break;
591 case IP_TTL:
592 if (optlen < 1)
593 goto e_inval;
594 if (val != -1 && (val < 1 || val > 255))
595 goto e_inval;
596 inet->uc_ttl = val;
597 break;
598 case IP_HDRINCL:
599 if (sk->sk_type != SOCK_RAW) {
600 err = -ENOPROTOOPT;
601 break;
602 }
603 inet->hdrincl = val ? 1 : 0;
604 break;
605 case IP_NODEFRAG:
606 if (sk->sk_type != SOCK_RAW) {
607 err = -ENOPROTOOPT;
608 break;
609 }
610 inet->nodefrag = val ? 1 : 0;
611 break;
612 case IP_MTU_DISCOVER:
613 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
614 goto e_inval;
615 inet->pmtudisc = val;
616 break;
617 case IP_RECVERR:
618 inet->recverr = !!val;
619 if (!val)
620 skb_queue_purge(&sk->sk_error_queue);
621 break;
622 case IP_MULTICAST_TTL:
623 if (sk->sk_type == SOCK_STREAM)
624 goto e_inval;
625 if (optlen < 1)
626 goto e_inval;
627 if (val == -1)
628 val = 1;
629 if (val < 0 || val > 255)
630 goto e_inval;
631 inet->mc_ttl = val;
632 break;
633 case IP_MULTICAST_LOOP:
634 if (optlen < 1)
635 goto e_inval;
636 inet->mc_loop = !!val;
637 break;
638 case IP_UNICAST_IF:
639 {
640 struct net_device *dev = NULL;
641 int ifindex;
642
643 if (optlen != sizeof(int))
644 goto e_inval;
645
646 ifindex = (__force int)ntohl((__force __be32)val);
647 if (ifindex == 0) {
648 inet->uc_index = 0;
649 err = 0;
650 break;
651 }
652
653 dev = dev_get_by_index(sock_net(sk), ifindex);
654 err = -EADDRNOTAVAIL;
655 if (!dev)
656 break;
657 dev_put(dev);
658
659 err = -EINVAL;
660 if (sk->sk_bound_dev_if)
661 break;
662
663 inet->uc_index = ifindex;
664 err = 0;
665 break;
666 }
667 case IP_MULTICAST_IF:
668 {
669 struct ip_mreqn mreq;
670 struct net_device *dev = NULL;
671
672 if (sk->sk_type == SOCK_STREAM)
673 goto e_inval;
674 /*
675 * Check the arguments are allowable
676 */
677
678 if (optlen < sizeof(struct in_addr))
679 goto e_inval;
680
681 err = -EFAULT;
682 if (optlen >= sizeof(struct ip_mreqn)) {
683 if (copy_from_user(&mreq, optval, sizeof(mreq)))
684 break;
685 } else {
686 memset(&mreq, 0, sizeof(mreq));
687 if (optlen >= sizeof(struct ip_mreq)) {
688 if (copy_from_user(&mreq, optval,
689 sizeof(struct ip_mreq)))
690 break;
691 } else if (optlen >= sizeof(struct in_addr)) {
692 if (copy_from_user(&mreq.imr_address, optval,
693 sizeof(struct in_addr)))
694 break;
695 }
696 }
697
698 if (!mreq.imr_ifindex) {
699 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
700 inet->mc_index = 0;
701 inet->mc_addr = 0;
702 err = 0;
703 break;
704 }
705 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
706 if (dev)
707 mreq.imr_ifindex = dev->ifindex;
708 } else
709 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
710
711
712 err = -EADDRNOTAVAIL;
713 if (!dev)
714 break;
715 dev_put(dev);
716
717 err = -EINVAL;
718 if (sk->sk_bound_dev_if &&
719 mreq.imr_ifindex != sk->sk_bound_dev_if)
720 break;
721
722 inet->mc_index = mreq.imr_ifindex;
723 inet->mc_addr = mreq.imr_address.s_addr;
724 err = 0;
725 break;
726 }
727
728 case IP_ADD_MEMBERSHIP:
729 case IP_DROP_MEMBERSHIP:
730 {
731 struct ip_mreqn mreq;
732
733 err = -EPROTO;
734 if (inet_sk(sk)->is_icsk)
735 break;
736
737 if (optlen < sizeof(struct ip_mreq))
738 goto e_inval;
739 err = -EFAULT;
740 if (optlen >= sizeof(struct ip_mreqn)) {
741 if (copy_from_user(&mreq, optval, sizeof(mreq)))
742 break;
743 } else {
744 memset(&mreq, 0, sizeof(mreq));
745 if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
746 break;
747 }
748
749 if (optname == IP_ADD_MEMBERSHIP)
750 err = ip_mc_join_group(sk, &mreq);
751 else
752 err = ip_mc_leave_group(sk, &mreq);
753 break;
754 }
755 case IP_MSFILTER:
756 {
757 struct ip_msfilter *msf;
758
759 if (optlen < IP_MSFILTER_SIZE(0))
760 goto e_inval;
761 if (optlen > sysctl_optmem_max) {
762 err = -ENOBUFS;
763 break;
764 }
765 msf = kmalloc(optlen, GFP_KERNEL);
766 if (!msf) {
767 err = -ENOBUFS;
768 break;
769 }
770 err = -EFAULT;
771 if (copy_from_user(msf, optval, optlen)) {
772 kfree(msf);
773 break;
774 }
775 /* numsrc >= (1G-4) overflow in 32 bits */
776 if (msf->imsf_numsrc >= 0x3ffffffcU ||
777 msf->imsf_numsrc > sysctl_igmp_max_msf) {
778 kfree(msf);
779 err = -ENOBUFS;
780 break;
781 }
782 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
783 kfree(msf);
784 err = -EINVAL;
785 break;
786 }
787 err = ip_mc_msfilter(sk, msf, 0);
788 kfree(msf);
789 break;
790 }
791 case IP_BLOCK_SOURCE:
792 case IP_UNBLOCK_SOURCE:
793 case IP_ADD_SOURCE_MEMBERSHIP:
794 case IP_DROP_SOURCE_MEMBERSHIP:
795 {
796 struct ip_mreq_source mreqs;
797 int omode, add;
798
799 if (optlen != sizeof(struct ip_mreq_source))
800 goto e_inval;
801 if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
802 err = -EFAULT;
803 break;
804 }
805 if (optname == IP_BLOCK_SOURCE) {
806 omode = MCAST_EXCLUDE;
807 add = 1;
808 } else if (optname == IP_UNBLOCK_SOURCE) {
809 omode = MCAST_EXCLUDE;
810 add = 0;
811 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
812 struct ip_mreqn mreq;
813
814 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
815 mreq.imr_address.s_addr = mreqs.imr_interface;
816 mreq.imr_ifindex = 0;
817 err = ip_mc_join_group(sk, &mreq);
818 if (err && err != -EADDRINUSE)
819 break;
820 omode = MCAST_INCLUDE;
821 add = 1;
822 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
823 omode = MCAST_INCLUDE;
824 add = 0;
825 }
826 err = ip_mc_source(add, omode, sk, &mreqs, 0);
827 break;
828 }
829 case MCAST_JOIN_GROUP:
830 case MCAST_LEAVE_GROUP:
831 {
832 struct group_req greq;
833 struct sockaddr_in *psin;
834 struct ip_mreqn mreq;
835
836 if (optlen < sizeof(struct group_req))
837 goto e_inval;
838 err = -EFAULT;
839 if (copy_from_user(&greq, optval, sizeof(greq)))
840 break;
841 psin = (struct sockaddr_in *)&greq.gr_group;
842 if (psin->sin_family != AF_INET)
843 goto e_inval;
844 memset(&mreq, 0, sizeof(mreq));
845 mreq.imr_multiaddr = psin->sin_addr;
846 mreq.imr_ifindex = greq.gr_interface;
847
848 if (optname == MCAST_JOIN_GROUP)
849 err = ip_mc_join_group(sk, &mreq);
850 else
851 err = ip_mc_leave_group(sk, &mreq);
852 break;
853 }
854 case MCAST_JOIN_SOURCE_GROUP:
855 case MCAST_LEAVE_SOURCE_GROUP:
856 case MCAST_BLOCK_SOURCE:
857 case MCAST_UNBLOCK_SOURCE:
858 {
859 struct group_source_req greqs;
860 struct ip_mreq_source mreqs;
861 struct sockaddr_in *psin;
862 int omode, add;
863
864 if (optlen != sizeof(struct group_source_req))
865 goto e_inval;
866 if (copy_from_user(&greqs, optval, sizeof(greqs))) {
867 err = -EFAULT;
868 break;
869 }
870 if (greqs.gsr_group.ss_family != AF_INET ||
871 greqs.gsr_source.ss_family != AF_INET) {
872 err = -EADDRNOTAVAIL;
873 break;
874 }
875 psin = (struct sockaddr_in *)&greqs.gsr_group;
876 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
877 psin = (struct sockaddr_in *)&greqs.gsr_source;
878 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
879 mreqs.imr_interface = 0; /* use index for mc_source */
880
881 if (optname == MCAST_BLOCK_SOURCE) {
882 omode = MCAST_EXCLUDE;
883 add = 1;
884 } else if (optname == MCAST_UNBLOCK_SOURCE) {
885 omode = MCAST_EXCLUDE;
886 add = 0;
887 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
888 struct ip_mreqn mreq;
889
890 psin = (struct sockaddr_in *)&greqs.gsr_group;
891 mreq.imr_multiaddr = psin->sin_addr;
892 mreq.imr_address.s_addr = 0;
893 mreq.imr_ifindex = greqs.gsr_interface;
894 err = ip_mc_join_group(sk, &mreq);
895 if (err && err != -EADDRINUSE)
896 break;
897 greqs.gsr_interface = mreq.imr_ifindex;
898 omode = MCAST_INCLUDE;
899 add = 1;
900 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
901 omode = MCAST_INCLUDE;
902 add = 0;
903 }
904 err = ip_mc_source(add, omode, sk, &mreqs,
905 greqs.gsr_interface);
906 break;
907 }
908 case MCAST_MSFILTER:
909 {
910 struct sockaddr_in *psin;
911 struct ip_msfilter *msf = NULL;
912 struct group_filter *gsf = NULL;
913 int msize, i, ifindex;
914
915 if (optlen < GROUP_FILTER_SIZE(0))
916 goto e_inval;
917 if (optlen > sysctl_optmem_max) {
918 err = -ENOBUFS;
919 break;
920 }
921 gsf = kmalloc(optlen, GFP_KERNEL);
922 if (!gsf) {
923 err = -ENOBUFS;
924 break;
925 }
926 err = -EFAULT;
927 if (copy_from_user(gsf, optval, optlen))
928 goto mc_msf_out;
929
930 /* numsrc >= (4G-140)/128 overflow in 32 bits */
931 if (gsf->gf_numsrc >= 0x1ffffff ||
932 gsf->gf_numsrc > sysctl_igmp_max_msf) {
933 err = -ENOBUFS;
934 goto mc_msf_out;
935 }
936 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
937 err = -EINVAL;
938 goto mc_msf_out;
939 }
940 msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
941 msf = kmalloc(msize, GFP_KERNEL);
942 if (!msf) {
943 err = -ENOBUFS;
944 goto mc_msf_out;
945 }
946 ifindex = gsf->gf_interface;
947 psin = (struct sockaddr_in *)&gsf->gf_group;
948 if (psin->sin_family != AF_INET) {
949 err = -EADDRNOTAVAIL;
950 goto mc_msf_out;
951 }
952 msf->imsf_multiaddr = psin->sin_addr.s_addr;
953 msf->imsf_interface = 0;
954 msf->imsf_fmode = gsf->gf_fmode;
955 msf->imsf_numsrc = gsf->gf_numsrc;
956 err = -EADDRNOTAVAIL;
957 for (i = 0; i < gsf->gf_numsrc; ++i) {
958 psin = (struct sockaddr_in *)&gsf->gf_slist[i];
959
960 if (psin->sin_family != AF_INET)
961 goto mc_msf_out;
962 msf->imsf_slist[i] = psin->sin_addr.s_addr;
963 }
964 kfree(gsf);
965 gsf = NULL;
966
967 err = ip_mc_msfilter(sk, msf, ifindex);
968 mc_msf_out:
969 kfree(msf);
970 kfree(gsf);
971 break;
972 }
973 case IP_MULTICAST_ALL:
974 if (optlen < 1)
975 goto e_inval;
976 if (val != 0 && val != 1)
977 goto e_inval;
978 inet->mc_all = val;
979 break;
980 case IP_ROUTER_ALERT:
981 err = ip_ra_control(sk, val ? 1 : 0, NULL);
982 break;
983
984 case IP_FREEBIND:
985 if (optlen < 1)
986 goto e_inval;
987 inet->freebind = !!val;
988 break;
989
990 case IP_IPSEC_POLICY:
991 case IP_XFRM_POLICY:
992 err = -EPERM;
993 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
994 break;
995 err = xfrm_user_policy(sk, optname, optval, optlen);
996 break;
997
998 case IP_TRANSPARENT:
999 if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1000 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1001 err = -EPERM;
1002 break;
1003 }
1004 if (optlen < 1)
1005 goto e_inval;
1006 inet->transparent = !!val;
1007 break;
1008
1009 case IP_MINTTL:
1010 if (optlen < 1)
1011 goto e_inval;
1012 if (val < 0 || val > 255)
1013 goto e_inval;
1014 inet->min_ttl = val;
1015 break;
1016
1017 default:
1018 err = -ENOPROTOOPT;
1019 break;
1020 }
1021 release_sock(sk);
1022 return err;
1023
1024 e_inval:
1025 release_sock(sk);
1026 return -EINVAL;
1027 }
1028
1029 /**
1030 * ipv4_pktinfo_prepare - transfert some info from rtable to skb
1031 * @sk: socket
1032 * @skb: buffer
1033 *
1034 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1035 * destination in skb->cb[] before dst drop.
1036 * This way, receiver doesnt make cache line misses to read rtable.
1037 */
1038 void ipv4_pktinfo_prepare(struct sk_buff *skb)
1039 {
1040 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1041
1042 if (skb_rtable(skb)) {
1043 pktinfo->ipi_ifindex = inet_iif(skb);
1044 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1045 } else {
1046 pktinfo->ipi_ifindex = 0;
1047 pktinfo->ipi_spec_dst.s_addr = 0;
1048 }
1049 /* We need to keep the dst for __ip_options_echo()
1050 * We could restrict the test to opt.ts_needtime || opt.srr,
1051 * but the following is good enough as IP options are not often used.
1052 */
1053 if (unlikely(IPCB(skb)->opt.optlen))
1054 skb_dst_force(skb);
1055 else
1056 skb_dst_drop(skb);
1057 }
1058
1059 int ip_setsockopt(struct sock *sk, int level,
1060 int optname, char __user *optval, unsigned int optlen)
1061 {
1062 int err;
1063
1064 if (level != SOL_IP)
1065 return -ENOPROTOOPT;
1066
1067 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1068 #ifdef CONFIG_NETFILTER
1069 /* we need to exclude all possible ENOPROTOOPTs except default case */
1070 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1071 optname != IP_IPSEC_POLICY &&
1072 optname != IP_XFRM_POLICY &&
1073 !ip_mroute_opt(optname)) {
1074 lock_sock(sk);
1075 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1076 release_sock(sk);
1077 }
1078 #endif
1079 return err;
1080 }
1081 EXPORT_SYMBOL(ip_setsockopt);
1082
1083 #ifdef CONFIG_COMPAT
1084 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
1085 char __user *optval, unsigned int optlen)
1086 {
1087 int err;
1088
1089 if (level != SOL_IP)
1090 return -ENOPROTOOPT;
1091
1092 if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
1093 return compat_mc_setsockopt(sk, level, optname, optval, optlen,
1094 ip_setsockopt);
1095
1096 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1097 #ifdef CONFIG_NETFILTER
1098 /* we need to exclude all possible ENOPROTOOPTs except default case */
1099 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1100 optname != IP_IPSEC_POLICY &&
1101 optname != IP_XFRM_POLICY &&
1102 !ip_mroute_opt(optname)) {
1103 lock_sock(sk);
1104 err = compat_nf_setsockopt(sk, PF_INET, optname,
1105 optval, optlen);
1106 release_sock(sk);
1107 }
1108 #endif
1109 return err;
1110 }
1111 EXPORT_SYMBOL(compat_ip_setsockopt);
1112 #endif
1113
1114 /*
1115 * Get the options. Note for future reference. The GET of IP options gets
1116 * the _received_ ones. The set sets the _sent_ ones.
1117 */
1118
1119 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1120 char __user *optval, int __user *optlen, unsigned int flags)
1121 {
1122 struct inet_sock *inet = inet_sk(sk);
1123 int val;
1124 int len;
1125
1126 if (level != SOL_IP)
1127 return -EOPNOTSUPP;
1128
1129 if (ip_mroute_opt(optname))
1130 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1131
1132 if (get_user(len, optlen))
1133 return -EFAULT;
1134 if (len < 0)
1135 return -EINVAL;
1136
1137 lock_sock(sk);
1138
1139 switch (optname) {
1140 case IP_OPTIONS:
1141 {
1142 unsigned char optbuf[sizeof(struct ip_options)+40];
1143 struct ip_options *opt = (struct ip_options *)optbuf;
1144 struct ip_options_rcu *inet_opt;
1145
1146 inet_opt = rcu_dereference_protected(inet->inet_opt,
1147 sock_owned_by_user(sk));
1148 opt->optlen = 0;
1149 if (inet_opt)
1150 memcpy(optbuf, &inet_opt->opt,
1151 sizeof(struct ip_options) +
1152 inet_opt->opt.optlen);
1153 release_sock(sk);
1154
1155 if (opt->optlen == 0)
1156 return put_user(0, optlen);
1157
1158 ip_options_undo(opt);
1159
1160 len = min_t(unsigned int, len, opt->optlen);
1161 if (put_user(len, optlen))
1162 return -EFAULT;
1163 if (copy_to_user(optval, opt->__data, len))
1164 return -EFAULT;
1165 return 0;
1166 }
1167 case IP_PKTINFO:
1168 val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
1169 break;
1170 case IP_RECVTTL:
1171 val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
1172 break;
1173 case IP_RECVTOS:
1174 val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
1175 break;
1176 case IP_RECVOPTS:
1177 val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
1178 break;
1179 case IP_RETOPTS:
1180 val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
1181 break;
1182 case IP_PASSSEC:
1183 val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
1184 break;
1185 case IP_RECVORIGDSTADDR:
1186 val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
1187 break;
1188 case IP_TOS:
1189 val = inet->tos;
1190 break;
1191 case IP_TTL:
1192 val = (inet->uc_ttl == -1 ?
1193 sysctl_ip_default_ttl :
1194 inet->uc_ttl);
1195 break;
1196 case IP_HDRINCL:
1197 val = inet->hdrincl;
1198 break;
1199 case IP_NODEFRAG:
1200 val = inet->nodefrag;
1201 break;
1202 case IP_MTU_DISCOVER:
1203 val = inet->pmtudisc;
1204 break;
1205 case IP_MTU:
1206 {
1207 struct dst_entry *dst;
1208 val = 0;
1209 dst = sk_dst_get(sk);
1210 if (dst) {
1211 val = dst_mtu(dst);
1212 dst_release(dst);
1213 }
1214 if (!val) {
1215 release_sock(sk);
1216 return -ENOTCONN;
1217 }
1218 break;
1219 }
1220 case IP_RECVERR:
1221 val = inet->recverr;
1222 break;
1223 case IP_MULTICAST_TTL:
1224 val = inet->mc_ttl;
1225 break;
1226 case IP_MULTICAST_LOOP:
1227 val = inet->mc_loop;
1228 break;
1229 case IP_UNICAST_IF:
1230 val = (__force int)htonl((__u32) inet->uc_index);
1231 break;
1232 case IP_MULTICAST_IF:
1233 {
1234 struct in_addr addr;
1235 len = min_t(unsigned int, len, sizeof(struct in_addr));
1236 addr.s_addr = inet->mc_addr;
1237 release_sock(sk);
1238
1239 if (put_user(len, optlen))
1240 return -EFAULT;
1241 if (copy_to_user(optval, &addr, len))
1242 return -EFAULT;
1243 return 0;
1244 }
1245 case IP_MSFILTER:
1246 {
1247 struct ip_msfilter msf;
1248 int err;
1249
1250 if (len < IP_MSFILTER_SIZE(0)) {
1251 release_sock(sk);
1252 return -EINVAL;
1253 }
1254 if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
1255 release_sock(sk);
1256 return -EFAULT;
1257 }
1258 err = ip_mc_msfget(sk, &msf,
1259 (struct ip_msfilter __user *)optval, optlen);
1260 release_sock(sk);
1261 return err;
1262 }
1263 case MCAST_MSFILTER:
1264 {
1265 struct group_filter gsf;
1266 int err;
1267
1268 if (len < GROUP_FILTER_SIZE(0)) {
1269 release_sock(sk);
1270 return -EINVAL;
1271 }
1272 if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
1273 release_sock(sk);
1274 return -EFAULT;
1275 }
1276 err = ip_mc_gsfget(sk, &gsf,
1277 (struct group_filter __user *)optval,
1278 optlen);
1279 release_sock(sk);
1280 return err;
1281 }
1282 case IP_MULTICAST_ALL:
1283 val = inet->mc_all;
1284 break;
1285 case IP_PKTOPTIONS:
1286 {
1287 struct msghdr msg;
1288
1289 release_sock(sk);
1290
1291 if (sk->sk_type != SOCK_STREAM)
1292 return -ENOPROTOOPT;
1293
1294 msg.msg_control = optval;
1295 msg.msg_controllen = len;
1296 msg.msg_flags = flags;
1297
1298 if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
1299 struct in_pktinfo info;
1300
1301 info.ipi_addr.s_addr = inet->inet_rcv_saddr;
1302 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
1303 info.ipi_ifindex = inet->mc_index;
1304 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1305 }
1306 if (inet->cmsg_flags & IP_CMSG_TTL) {
1307 int hlim = inet->mc_ttl;
1308 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1309 }
1310 if (inet->cmsg_flags & IP_CMSG_TOS) {
1311 int tos = inet->rcv_tos;
1312 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1313 }
1314 len -= msg.msg_controllen;
1315 return put_user(len, optlen);
1316 }
1317 case IP_FREEBIND:
1318 val = inet->freebind;
1319 break;
1320 case IP_TRANSPARENT:
1321 val = inet->transparent;
1322 break;
1323 case IP_MINTTL:
1324 val = inet->min_ttl;
1325 break;
1326 default:
1327 release_sock(sk);
1328 return -ENOPROTOOPT;
1329 }
1330 release_sock(sk);
1331
1332 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1333 unsigned char ucval = (unsigned char)val;
1334 len = 1;
1335 if (put_user(len, optlen))
1336 return -EFAULT;
1337 if (copy_to_user(optval, &ucval, 1))
1338 return -EFAULT;
1339 } else {
1340 len = min_t(unsigned int, sizeof(int), len);
1341 if (put_user(len, optlen))
1342 return -EFAULT;
1343 if (copy_to_user(optval, &val, len))
1344 return -EFAULT;
1345 }
1346 return 0;
1347 }
1348
1349 int ip_getsockopt(struct sock *sk, int level,
1350 int optname, char __user *optval, int __user *optlen)
1351 {
1352 int err;
1353
1354 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
1355 #ifdef CONFIG_NETFILTER
1356 /* we need to exclude all possible ENOPROTOOPTs except default case */
1357 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1358 !ip_mroute_opt(optname)) {
1359 int len;
1360
1361 if (get_user(len, optlen))
1362 return -EFAULT;
1363
1364 lock_sock(sk);
1365 err = nf_getsockopt(sk, PF_INET, optname, optval,
1366 &len);
1367 release_sock(sk);
1368 if (err >= 0)
1369 err = put_user(len, optlen);
1370 return err;
1371 }
1372 #endif
1373 return err;
1374 }
1375 EXPORT_SYMBOL(ip_getsockopt);
1376
1377 #ifdef CONFIG_COMPAT
1378 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1379 char __user *optval, int __user *optlen)
1380 {
1381 int err;
1382
1383 if (optname == MCAST_MSFILTER)
1384 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
1385 ip_getsockopt);
1386
1387 err = do_ip_getsockopt(sk, level, optname, optval, optlen,
1388 MSG_CMSG_COMPAT);
1389
1390 #ifdef CONFIG_NETFILTER
1391 /* we need to exclude all possible ENOPROTOOPTs except default case */
1392 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1393 !ip_mroute_opt(optname)) {
1394 int len;
1395
1396 if (get_user(len, optlen))
1397 return -EFAULT;
1398
1399 lock_sock(sk);
1400 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
1401 release_sock(sk);
1402 if (err >= 0)
1403 err = put_user(len, optlen);
1404 return err;
1405 }
1406 #endif
1407 return err;
1408 }
1409 EXPORT_SYMBOL(compat_ip_getsockopt);
1410 #endif