import PULS_20180308
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / core / sock.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/types.h>
97 #include <linux/socket.h>
98 #include <linux/in.h>
99 #include <linux/kernel.h>
100 #include <linux/module.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <linux/sched.h>
104 #include <linux/timer.h>
105 #include <linux/string.h>
106 #include <linux/sockios.h>
107 #include <linux/net.h>
108 #include <linux/mm.h>
109 #include <linux/slab.h>
110 #include <linux/interrupt.h>
111 #include <linux/poll.h>
112 #include <linux/tcp.h>
113 #include <linux/init.h>
114 #include <linux/highmem.h>
115 #include <linux/user_namespace.h>
116 #include <linux/static_key.h>
117 #include <linux/memcontrol.h>
118 #include <linux/prefetch.h>
119
120 #include <asm/uaccess.h>
121
122 #include <linux/netdevice.h>
123 #include <net/protocol.h>
124 #include <linux/skbuff.h>
125 #include <net/net_namespace.h>
126 #include <net/request_sock.h>
127 #include <net/sock.h>
128 #include <linux/net_tstamp.h>
129 #include <net/xfrm.h>
130 #include <linux/ipsec.h>
131 #include <net/cls_cgroup.h>
132 #include <net/netprio_cgroup.h>
133
134 #include <linux/filter.h>
135
136 #include <trace/events/sock.h>
137
138 #include <net/af_unix.h>
139
140
141 #ifdef CONFIG_INET
142 #include <net/tcp.h>
143 #endif
144 #include <linux/xlog.h>
145
146 static DEFINE_MUTEX(proto_list_mutex);
147 static LIST_HEAD(proto_list);
148
149 /**
150 * sk_ns_capable - General socket capability test
151 * @sk: Socket to use a capability on or through
152 * @user_ns: The user namespace of the capability to use
153 * @cap: The capability to use
154 *
155 * Test to see if the opener of the socket had when the socket was
156 * created and the current process has the capability @cap in the user
157 * namespace @user_ns.
158 */
159 bool sk_ns_capable(const struct sock *sk,
160 struct user_namespace *user_ns, int cap)
161 {
162 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
163 ns_capable(user_ns, cap);
164 }
165 EXPORT_SYMBOL(sk_ns_capable);
166
167 /**
168 * sk_capable - Socket global capability test
169 * @sk: Socket to use a capability on or through
170 * @cap: The global capbility to use
171 *
172 * Test to see if the opener of the socket had when the socket was
173 * created and the current process has the capability @cap in all user
174 * namespaces.
175 */
176 bool sk_capable(const struct sock *sk, int cap)
177 {
178 return sk_ns_capable(sk, &init_user_ns, cap);
179 }
180 EXPORT_SYMBOL(sk_capable);
181
182 /**
183 * sk_net_capable - Network namespace socket capability test
184 * @sk: Socket to use a capability on or through
185 * @cap: The capability to use
186 *
187 * Test to see if the opener of the socket had when the socke was created
188 * and the current process has the capability @cap over the network namespace
189 * the socket is a member of.
190 */
191 bool sk_net_capable(const struct sock *sk, int cap)
192 {
193 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
194 }
195 EXPORT_SYMBOL(sk_net_capable);
196
197
198 #ifdef CONFIG_MEMCG_KMEM
199 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
200 {
201 struct proto *proto;
202 int ret = 0;
203
204 mutex_lock(&proto_list_mutex);
205 list_for_each_entry(proto, &proto_list, node) {
206 if (proto->init_cgroup) {
207 ret = proto->init_cgroup(memcg, ss);
208 if (ret)
209 goto out;
210 }
211 }
212
213 mutex_unlock(&proto_list_mutex);
214 return ret;
215 out:
216 list_for_each_entry_continue_reverse(proto, &proto_list, node)
217 if (proto->destroy_cgroup)
218 proto->destroy_cgroup(memcg);
219 mutex_unlock(&proto_list_mutex);
220 return ret;
221 }
222
223 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
224 {
225 struct proto *proto;
226
227 mutex_lock(&proto_list_mutex);
228 list_for_each_entry_reverse(proto, &proto_list, node)
229 if (proto->destroy_cgroup)
230 proto->destroy_cgroup(memcg);
231 mutex_unlock(&proto_list_mutex);
232 }
233 #endif
234
235 /*
236 * Each address family might have different locking rules, so we have
237 * one slock key per address family:
238 */
239 static struct lock_class_key af_family_keys[AF_MAX];
240 static struct lock_class_key af_family_slock_keys[AF_MAX];
241
242 #if defined(CONFIG_MEMCG_KMEM)
243 struct static_key memcg_socket_limit_enabled;
244 EXPORT_SYMBOL(memcg_socket_limit_enabled);
245 #endif
246
247 /*
248 * Make lock validator output more readable. (we pre-construct these
249 * strings build-time, so that runtime initialization of socket
250 * locks is fast):
251 */
252 static const char *const af_family_key_strings[AF_MAX+1] = {
253 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
254 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
255 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
256 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
257 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
258 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
259 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
260 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
261 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
262 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
263 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
264 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
265 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
266 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
267 };
268 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
269 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
270 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
271 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
272 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
273 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
274 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
275 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
276 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
277 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
278 "slock-27" , "slock-28" , "slock-AF_CAN" ,
279 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
280 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
281 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
282 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
283 };
284 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
285 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
286 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
287 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
288 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
289 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
290 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
291 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
292 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
293 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
294 "clock-27" , "clock-28" , "clock-AF_CAN" ,
295 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
296 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
297 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
298 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
299 };
300
301 /*
302 * sk_callback_lock locking rules are per-address-family,
303 * so split the lock classes by using a per-AF key:
304 */
305 static struct lock_class_key af_callback_keys[AF_MAX];
306
307 /* Take into consideration the size of the struct sk_buff overhead in the
308 * determination of these values, since that is non-constant across
309 * platforms. This makes socket queueing behavior and performance
310 * not depend upon such differences.
311 */
312 #define _SK_MEM_PACKETS 256
313 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
314 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
315 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
316
317 /* Run time adjustable parameters. */
318 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
319 EXPORT_SYMBOL(sysctl_wmem_max);
320 __u32 sysctl_rmem_max __read_mostly = (SK_RMEM_MAX*8);
321 EXPORT_SYMBOL(sysctl_rmem_max);
322 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
323 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
324
325 /* Maximal space eaten by iovec or ancillary data plus some space */
326 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
327 EXPORT_SYMBOL(sysctl_optmem_max);
328
329 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
330 EXPORT_SYMBOL_GPL(memalloc_socks);
331
332 /**
333 * sk_set_memalloc - sets %SOCK_MEMALLOC
334 * @sk: socket to set it on
335 *
336 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
337 * It's the responsibility of the admin to adjust min_free_kbytes
338 * to meet the requirements
339 */
340 void sk_set_memalloc(struct sock *sk)
341 {
342 sock_set_flag(sk, SOCK_MEMALLOC);
343 sk->sk_allocation |= __GFP_MEMALLOC;
344 static_key_slow_inc(&memalloc_socks);
345 }
346 EXPORT_SYMBOL_GPL(sk_set_memalloc);
347
348 void sk_clear_memalloc(struct sock *sk)
349 {
350 sock_reset_flag(sk, SOCK_MEMALLOC);
351 sk->sk_allocation &= ~__GFP_MEMALLOC;
352 static_key_slow_dec(&memalloc_socks);
353
354 /*
355 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
356 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
357 * it has rmem allocations there is a risk that the user of the
358 * socket cannot make forward progress due to exceeding the rmem
359 * limits. By rights, sk_clear_memalloc() should only be called
360 * on sockets being torn down but warn and reset the accounting if
361 * that assumption breaks.
362 */
363 if (WARN_ON(sk->sk_forward_alloc))
364 sk_mem_reclaim(sk);
365 }
366 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
367
368 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
369 {
370 int ret;
371 unsigned long pflags = current->flags;
372
373 /* these should have been dropped before queueing */
374 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
375
376 current->flags |= PF_MEMALLOC;
377 ret = sk->sk_backlog_rcv(sk, skb);
378 tsk_restore_flags(current, pflags, PF_MEMALLOC);
379
380 return ret;
381 }
382 EXPORT_SYMBOL(__sk_backlog_rcv);
383
384 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
385 {
386 struct timeval tv;
387
388 if (optlen < sizeof(tv))
389 return -EINVAL;
390 if (copy_from_user(&tv, optval, sizeof(tv)))
391 return -EFAULT;
392 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
393 return -EDOM;
394
395 if (tv.tv_sec < 0) {
396 static int warned __read_mostly;
397
398 *timeo_p = 0;
399 if (warned < 10 && net_ratelimit()) {
400 warned++;
401 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
402 __func__, current->comm, task_pid_nr(current));
403 }
404 return 0;
405 }
406 *timeo_p = MAX_SCHEDULE_TIMEOUT;
407 if (tv.tv_sec == 0 && tv.tv_usec == 0)
408 return 0;
409 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
410 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
411 return 0;
412 }
413
414 static void sock_warn_obsolete_bsdism(const char *name)
415 {
416 static int warned;
417 static char warncomm[TASK_COMM_LEN];
418 if (strcmp(warncomm, current->comm) && warned < 5) {
419 strcpy(warncomm, current->comm);
420 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
421 warncomm, name);
422 warned++;
423 }
424 }
425
426 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
427
428 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
429 {
430 if (sk->sk_flags & flags) {
431 sk->sk_flags &= ~flags;
432 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
433 net_disable_timestamp();
434 }
435 }
436
437
438 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
439 {
440 int err;
441 int skb_len;
442 unsigned long flags;
443 struct sk_buff_head *list = &sk->sk_receive_queue;
444
445 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
446 atomic_inc(&sk->sk_drops);
447 trace_sock_rcvqueue_full(sk, skb);
448 return -ENOMEM;
449 }
450
451 err = sk_filter(sk, skb);
452 if (err)
453 return err;
454
455 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
456 atomic_inc(&sk->sk_drops);
457 return -ENOBUFS;
458 }
459
460 skb->dev = NULL;
461 skb_set_owner_r(skb, sk);
462
463 /* Cache the SKB length before we tack it onto the receive
464 * queue. Once it is added it no longer belongs to us and
465 * may be freed by other threads of control pulling packets
466 * from the queue.
467 */
468 skb_len = skb->len;
469
470 /* we escape from rcu protected region, make sure we dont leak
471 * a norefcounted dst
472 */
473 skb_dst_force(skb);
474
475 spin_lock_irqsave(&list->lock, flags);
476 skb->dropcount = atomic_read(&sk->sk_drops);
477 __skb_queue_tail(list, skb);
478 spin_unlock_irqrestore(&list->lock, flags);
479
480 if (!sock_flag(sk, SOCK_DEAD))
481 sk->sk_data_ready(sk, skb_len);
482 return 0;
483 }
484 EXPORT_SYMBOL(sock_queue_rcv_skb);
485
486 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
487 {
488 int rc = NET_RX_SUCCESS;
489
490 if (sk_filter(sk, skb))
491 goto discard_and_relse;
492
493 skb->dev = NULL;
494
495 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
496 atomic_inc(&sk->sk_drops);
497 goto discard_and_relse;
498 }
499 if (nested)
500 bh_lock_sock_nested(sk);
501 else
502 bh_lock_sock(sk);
503 if (!sock_owned_by_user(sk)) {
504 /*
505 * trylock + unlock semantics:
506 */
507 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
508
509 rc = sk_backlog_rcv(sk, skb);
510
511 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
512 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
513 bh_unlock_sock(sk);
514 atomic_inc(&sk->sk_drops);
515 goto discard_and_relse;
516 }
517
518 bh_unlock_sock(sk);
519 out:
520 sock_put(sk);
521 return rc;
522 discard_and_relse:
523 kfree_skb(skb);
524 goto out;
525 }
526 EXPORT_SYMBOL(sk_receive_skb);
527
528 void sk_reset_txq(struct sock *sk)
529 {
530 sk_tx_queue_clear(sk);
531 }
532 EXPORT_SYMBOL(sk_reset_txq);
533
534 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
535 {
536 struct dst_entry *dst = __sk_dst_get(sk);
537
538 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
539 sk_tx_queue_clear(sk);
540 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
541 dst_release(dst);
542 return NULL;
543 }
544
545 return dst;
546 }
547 EXPORT_SYMBOL(__sk_dst_check);
548
549 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
550 {
551 struct dst_entry *dst = sk_dst_get(sk);
552
553 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
554 sk_dst_reset(sk);
555 dst_release(dst);
556 return NULL;
557 }
558
559 return dst;
560 }
561 EXPORT_SYMBOL(sk_dst_check);
562
563 static int sock_setbindtodevice(struct sock *sk, char __user *optval,
564 int optlen)
565 {
566 int ret = -ENOPROTOOPT;
567 #ifdef CONFIG_NETDEVICES
568 struct net *net = sock_net(sk);
569 char devname[IFNAMSIZ];
570 int index;
571
572 /* Sorry... */
573 ret = -EPERM;
574 if (!ns_capable(net->user_ns, CAP_NET_RAW))
575 goto out;
576
577 ret = -EINVAL;
578 if (optlen < 0)
579 goto out;
580
581 /* Bind this socket to a particular device like "eth0",
582 * as specified in the passed interface name. If the
583 * name is "" or the option length is zero the socket
584 * is not bound.
585 */
586 if (optlen > IFNAMSIZ - 1)
587 optlen = IFNAMSIZ - 1;
588 memset(devname, 0, sizeof(devname));
589
590 ret = -EFAULT;
591 if (copy_from_user(devname, optval, optlen))
592 goto out;
593
594 index = 0;
595 if (devname[0] != '\0') {
596 struct net_device *dev;
597
598 rcu_read_lock();
599 dev = dev_get_by_name_rcu(net, devname);
600 if (dev)
601 index = dev->ifindex;
602 rcu_read_unlock();
603 ret = -ENODEV;
604 if (!dev)
605 goto out;
606 }
607
608 lock_sock(sk);
609 sk->sk_bound_dev_if = index;
610 sk_dst_reset(sk);
611 release_sock(sk);
612
613 ret = 0;
614
615 out:
616 #endif
617
618 return ret;
619 }
620
621 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
622 int __user *optlen, int len)
623 {
624 int ret = -ENOPROTOOPT;
625 #ifdef CONFIG_NETDEVICES
626 struct net *net = sock_net(sk);
627 char devname[IFNAMSIZ];
628
629 if (sk->sk_bound_dev_if == 0) {
630 len = 0;
631 goto zero;
632 }
633
634 ret = -EINVAL;
635 if (len < IFNAMSIZ)
636 goto out;
637
638 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
639 if (ret)
640 goto out;
641
642 len = strlen(devname) + 1;
643
644 ret = -EFAULT;
645 if (copy_to_user(optval, devname, len))
646 goto out;
647
648 zero:
649 ret = -EFAULT;
650 if (put_user(len, optlen))
651 goto out;
652
653 ret = 0;
654
655 out:
656 #endif
657
658 return ret;
659 }
660
661 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
662 {
663 if (valbool)
664 sock_set_flag(sk, bit);
665 else
666 sock_reset_flag(sk, bit);
667 }
668
669 /*
670 * This is meant for all protocols to use and covers goings on
671 * at the socket level. Everything here is generic.
672 */
673
674 int sock_setsockopt(struct socket *sock, int level, int optname,
675 char __user *optval, unsigned int optlen)
676 {
677 struct sock *sk = sock->sk;
678 int val;
679 int valbool;
680 struct linger ling;
681 int ret = 0;
682
683 /*
684 * Options without arguments
685 */
686
687 if (optname == SO_BINDTODEVICE)
688 return sock_setbindtodevice(sk, optval, optlen);
689
690 if (optlen < sizeof(int))
691 return -EINVAL;
692
693 if (get_user(val, (int __user *)optval))
694 return -EFAULT;
695
696 valbool = val ? 1 : 0;
697
698 lock_sock(sk);
699
700 switch (optname) {
701 case SO_DEBUG:
702 if (val && !capable(CAP_NET_ADMIN))
703 ret = -EACCES;
704 else
705 sock_valbool_flag(sk, SOCK_DBG, valbool);
706 break;
707 case SO_REUSEADDR:
708 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
709 break;
710 case SO_REUSEPORT:
711 sk->sk_reuseport = valbool;
712 break;
713 case SO_TYPE:
714 case SO_PROTOCOL:
715 case SO_DOMAIN:
716 case SO_ERROR:
717 ret = -ENOPROTOOPT;
718 break;
719 case SO_DONTROUTE:
720 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
721 break;
722 case SO_BROADCAST:
723 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
724 break;
725 case SO_SNDBUF:
726 /* Don't error on this BSD doesn't and if you think
727 * about it this is right. Otherwise apps have to
728 * play 'guess the biggest size' games. RCVBUF/SNDBUF
729 * are treated in BSD as hints
730 */
731 val = min_t(u32, val, sysctl_wmem_max);
732 set_sndbuf:
733 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
734 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
735 /* Wake up sending tasks if we upped the value. */
736 sk->sk_write_space(sk);
737 break;
738
739 case SO_SNDBUFFORCE:
740 if (!capable(CAP_NET_ADMIN)) {
741 ret = -EPERM;
742 break;
743 }
744 goto set_sndbuf;
745
746 case SO_RCVBUF:
747 /* Don't error on this BSD doesn't and if you think
748 * about it this is right. Otherwise apps have to
749 * play 'guess the biggest size' games. RCVBUF/SNDBUF
750 * are treated in BSD as hints
751 */
752 val = min_t(u32, val, sysctl_rmem_max);
753 set_rcvbuf:
754 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
755 /*
756 * We double it on the way in to account for
757 * "struct sk_buff" etc. overhead. Applications
758 * assume that the SO_RCVBUF setting they make will
759 * allow that much actual data to be received on that
760 * socket.
761 *
762 * Applications are unaware that "struct sk_buff" and
763 * other overheads allocate from the receive buffer
764 * during socket buffer allocation.
765 *
766 * And after considering the possible alternatives,
767 * returning the value we actually used in getsockopt
768 * is the most desirable behavior.
769 */
770 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
771 break;
772
773 case SO_RCVBUFFORCE:
774 if (!capable(CAP_NET_ADMIN)) {
775 ret = -EPERM;
776 break;
777 }
778 goto set_rcvbuf;
779
780 case SO_KEEPALIVE:
781 #ifdef CONFIG_INET
782 if (sk->sk_protocol == IPPROTO_TCP &&
783 sk->sk_type == SOCK_STREAM)
784 tcp_set_keepalive(sk, valbool);
785 #endif
786 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
787 break;
788
789 case SO_OOBINLINE:
790 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
791 break;
792
793 case SO_NO_CHECK:
794 sk->sk_no_check = valbool;
795 break;
796
797 case SO_PRIORITY:
798 if ((val >= 0 && val <= 6) ||
799 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
800 sk->sk_priority = val;
801 else
802 ret = -EPERM;
803 break;
804
805 case SO_LINGER:
806 if (optlen < sizeof(ling)) {
807 ret = -EINVAL; /* 1003.1g */
808 break;
809 }
810 if (copy_from_user(&ling, optval, sizeof(ling))) {
811 ret = -EFAULT;
812 break;
813 }
814 if (!ling.l_onoff)
815 sock_reset_flag(sk, SOCK_LINGER);
816 else {
817 #if (BITS_PER_LONG == 32)
818 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
819 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
820 else
821 #endif
822 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
823 sock_set_flag(sk, SOCK_LINGER);
824 }
825 break;
826
827 case SO_BSDCOMPAT:
828 sock_warn_obsolete_bsdism("setsockopt");
829 break;
830
831 case SO_PASSCRED:
832 if (valbool)
833 set_bit(SOCK_PASSCRED, &sock->flags);
834 else
835 clear_bit(SOCK_PASSCRED, &sock->flags);
836 break;
837
838 case SO_TIMESTAMP:
839 case SO_TIMESTAMPNS:
840 if (valbool) {
841 if (optname == SO_TIMESTAMP)
842 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
843 else
844 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
845 sock_set_flag(sk, SOCK_RCVTSTAMP);
846 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
847 } else {
848 sock_reset_flag(sk, SOCK_RCVTSTAMP);
849 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
850 }
851 break;
852
853 case SO_TIMESTAMPING:
854 if (val & ~SOF_TIMESTAMPING_MASK) {
855 ret = -EINVAL;
856 break;
857 }
858 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
859 val & SOF_TIMESTAMPING_TX_HARDWARE);
860 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
861 val & SOF_TIMESTAMPING_TX_SOFTWARE);
862 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
863 val & SOF_TIMESTAMPING_RX_HARDWARE);
864 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
865 sock_enable_timestamp(sk,
866 SOCK_TIMESTAMPING_RX_SOFTWARE);
867 else
868 sock_disable_timestamp(sk,
869 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
870 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
871 val & SOF_TIMESTAMPING_SOFTWARE);
872 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
873 val & SOF_TIMESTAMPING_SYS_HARDWARE);
874 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
875 val & SOF_TIMESTAMPING_RAW_HARDWARE);
876 break;
877
878 case SO_RCVLOWAT:
879 if (val < 0)
880 val = INT_MAX;
881 sk->sk_rcvlowat = val ? : 1;
882 break;
883
884 case SO_RCVTIMEO:
885 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
886 break;
887
888 case SO_SNDTIMEO:
889 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
890 break;
891
892 case SO_ATTACH_FILTER:
893 ret = -EINVAL;
894 if (optlen == sizeof(struct sock_fprog)) {
895 struct sock_fprog fprog;
896
897 ret = -EFAULT;
898 if (copy_from_user(&fprog, optval, sizeof(fprog)))
899 break;
900
901 ret = sk_attach_filter(&fprog, sk);
902 }
903 break;
904
905 case SO_DETACH_FILTER:
906 ret = sk_detach_filter(sk);
907 break;
908
909 case SO_LOCK_FILTER:
910 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
911 ret = -EPERM;
912 else
913 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
914 break;
915
916 case SO_PASSSEC:
917 if (valbool)
918 set_bit(SOCK_PASSSEC, &sock->flags);
919 else
920 clear_bit(SOCK_PASSSEC, &sock->flags);
921 break;
922 case SO_MARK:
923 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
924 ret = -EPERM;
925 else
926 sk->sk_mark = val;
927 break;
928
929 /* We implement the SO_SNDLOWAT etc to
930 not be settable (1003.1g 5.3) */
931 case SO_RXQ_OVFL:
932 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
933 break;
934
935 case SO_WIFI_STATUS:
936 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
937 break;
938
939 case SO_PEEK_OFF:
940 if (sock->ops->set_peek_off)
941 ret = sock->ops->set_peek_off(sk, val);
942 else
943 ret = -EOPNOTSUPP;
944 break;
945
946 case SO_NOFCS:
947 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
948 break;
949
950 case SO_SELECT_ERR_QUEUE:
951 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
952 break;
953
954 default:
955 ret = -ENOPROTOOPT;
956 break;
957 }
958 release_sock(sk);
959 return ret;
960 }
961 EXPORT_SYMBOL(sock_setsockopt);
962
963
964 void cred_to_ucred(struct pid *pid, const struct cred *cred,
965 struct ucred *ucred)
966 {
967 ucred->pid = pid_vnr(pid);
968 ucred->uid = ucred->gid = -1;
969 if (cred) {
970 struct user_namespace *current_ns = current_user_ns();
971
972 ucred->uid = from_kuid_munged(current_ns, cred->euid);
973 ucred->gid = from_kgid_munged(current_ns, cred->egid);
974 }
975 }
976 EXPORT_SYMBOL_GPL(cred_to_ucred);
977
978 int sock_getsockopt(struct socket *sock, int level, int optname,
979 char __user *optval, int __user *optlen)
980 {
981 struct sock *sk = sock->sk;
982
983 union {
984 int val;
985 struct linger ling;
986 struct timeval tm;
987 } v;
988
989 int lv = sizeof(int);
990 int len;
991
992 if (get_user(len, optlen))
993 return -EFAULT;
994 if (len < 0)
995 return -EINVAL;
996
997 memset(&v, 0, sizeof(v));
998
999 switch (optname) {
1000 case SO_DEBUG:
1001 v.val = sock_flag(sk, SOCK_DBG);
1002 break;
1003
1004 case SO_DONTROUTE:
1005 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1006 break;
1007
1008 case SO_BROADCAST:
1009 v.val = sock_flag(sk, SOCK_BROADCAST);
1010 break;
1011
1012 case SO_SNDBUF:
1013 v.val = sk->sk_sndbuf;
1014 break;
1015
1016 case SO_RCVBUF:
1017 v.val = sk->sk_rcvbuf;
1018 break;
1019
1020 case SO_REUSEADDR:
1021 v.val = sk->sk_reuse;
1022 break;
1023
1024 case SO_REUSEPORT:
1025 v.val = sk->sk_reuseport;
1026 break;
1027
1028 case SO_KEEPALIVE:
1029 v.val = sock_flag(sk, SOCK_KEEPOPEN);
1030 break;
1031
1032 case SO_TYPE:
1033 v.val = sk->sk_type;
1034 break;
1035
1036 case SO_PROTOCOL:
1037 v.val = sk->sk_protocol;
1038 break;
1039
1040 case SO_DOMAIN:
1041 v.val = sk->sk_family;
1042 break;
1043
1044 case SO_ERROR:
1045 v.val = -sock_error(sk);
1046 if (v.val == 0)
1047 v.val = xchg(&sk->sk_err_soft, 0);
1048 break;
1049
1050 case SO_OOBINLINE:
1051 v.val = sock_flag(sk, SOCK_URGINLINE);
1052 break;
1053
1054 case SO_NO_CHECK:
1055 v.val = sk->sk_no_check;
1056 break;
1057
1058 case SO_PRIORITY:
1059 v.val = sk->sk_priority;
1060 break;
1061
1062 case SO_LINGER:
1063 lv = sizeof(v.ling);
1064 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
1065 v.ling.l_linger = sk->sk_lingertime / HZ;
1066 break;
1067
1068 case SO_BSDCOMPAT:
1069 sock_warn_obsolete_bsdism("getsockopt");
1070 break;
1071
1072 case SO_TIMESTAMP:
1073 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1074 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1075 break;
1076
1077 case SO_TIMESTAMPNS:
1078 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1079 break;
1080
1081 case SO_TIMESTAMPING:
1082 v.val = 0;
1083 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
1084 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
1085 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
1086 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
1087 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
1088 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
1089 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1090 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
1091 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
1092 v.val |= SOF_TIMESTAMPING_SOFTWARE;
1093 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
1094 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
1095 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
1096 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
1097 break;
1098
1099 case SO_RCVTIMEO:
1100 lv = sizeof(struct timeval);
1101 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1102 v.tm.tv_sec = 0;
1103 v.tm.tv_usec = 0;
1104 } else {
1105 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1106 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1107 }
1108 break;
1109
1110 case SO_SNDTIMEO:
1111 lv = sizeof(struct timeval);
1112 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1113 v.tm.tv_sec = 0;
1114 v.tm.tv_usec = 0;
1115 } else {
1116 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1117 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1118 }
1119 break;
1120
1121 case SO_RCVLOWAT:
1122 v.val = sk->sk_rcvlowat;
1123 break;
1124
1125 case SO_SNDLOWAT:
1126 v.val = 1;
1127 break;
1128
1129 case SO_PASSCRED:
1130 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1131 break;
1132
1133 case SO_PEERCRED:
1134 {
1135 struct ucred peercred;
1136 if (len > sizeof(peercred))
1137 len = sizeof(peercred);
1138 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1139 if (copy_to_user(optval, &peercred, len))
1140 return -EFAULT;
1141 goto lenout;
1142 }
1143
1144 case SO_PEERNAME:
1145 {
1146 char address[128];
1147
1148 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1149 return -ENOTCONN;
1150 if (lv < len)
1151 return -EINVAL;
1152 if (copy_to_user(optval, address, len))
1153 return -EFAULT;
1154 goto lenout;
1155 }
1156
1157 /* Dubious BSD thing... Probably nobody even uses it, but
1158 * the UNIX standard wants it for whatever reason... -DaveM
1159 */
1160 case SO_ACCEPTCONN:
1161 v.val = sk->sk_state == TCP_LISTEN;
1162 break;
1163
1164 case SO_PASSSEC:
1165 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1166 break;
1167
1168 case SO_PEERSEC:
1169 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1170
1171 case SO_MARK:
1172 v.val = sk->sk_mark;
1173 break;
1174
1175 case SO_RXQ_OVFL:
1176 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1177 break;
1178
1179 case SO_WIFI_STATUS:
1180 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1181 break;
1182
1183 case SO_PEEK_OFF:
1184 if (!sock->ops->set_peek_off)
1185 return -EOPNOTSUPP;
1186
1187 v.val = sk->sk_peek_off;
1188 break;
1189 case SO_NOFCS:
1190 v.val = sock_flag(sk, SOCK_NOFCS);
1191 break;
1192
1193 case SO_BINDTODEVICE:
1194 return sock_getbindtodevice(sk, optval, optlen, len);
1195
1196 case SO_GET_FILTER:
1197 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1198 if (len < 0)
1199 return len;
1200
1201 goto lenout;
1202
1203 case SO_LOCK_FILTER:
1204 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1205 break;
1206
1207 case SO_SELECT_ERR_QUEUE:
1208 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1209 break;
1210
1211 default:
1212 return -ENOPROTOOPT;
1213 }
1214
1215 if (len > lv)
1216 len = lv;
1217 if (copy_to_user(optval, &v, len))
1218 return -EFAULT;
1219 lenout:
1220 if (put_user(len, optlen))
1221 return -EFAULT;
1222 return 0;
1223 }
1224
1225 /*
1226 * Initialize an sk_lock.
1227 *
1228 * (We also register the sk_lock with the lock validator.)
1229 */
1230 static inline void sock_lock_init(struct sock *sk)
1231 {
1232 sock_lock_init_class_and_name(sk,
1233 af_family_slock_key_strings[sk->sk_family],
1234 af_family_slock_keys + sk->sk_family,
1235 af_family_key_strings[sk->sk_family],
1236 af_family_keys + sk->sk_family);
1237 }
1238
1239 /*
1240 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1241 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1242 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1243 */
1244 static void sock_copy(struct sock *nsk, const struct sock *osk)
1245 {
1246 #ifdef CONFIG_SECURITY_NETWORK
1247 void *sptr = nsk->sk_security;
1248 #endif
1249 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1250
1251 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1252 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1253
1254 #ifdef CONFIG_SECURITY_NETWORK
1255 nsk->sk_security = sptr;
1256 security_sk_clone(osk, nsk);
1257 #endif
1258 }
1259
1260 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1261 {
1262 unsigned long nulls1, nulls2;
1263
1264 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1265 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1266 if (nulls1 > nulls2)
1267 swap(nulls1, nulls2);
1268
1269 if (nulls1 != 0)
1270 memset((char *)sk, 0, nulls1);
1271 memset((char *)sk + nulls1 + sizeof(void *), 0,
1272 nulls2 - nulls1 - sizeof(void *));
1273 memset((char *)sk + nulls2 + sizeof(void *), 0,
1274 size - nulls2 - sizeof(void *));
1275 }
1276 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1277
1278 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1279 int family)
1280 {
1281 struct sock *sk;
1282 struct kmem_cache *slab;
1283
1284 slab = prot->slab;
1285 if (slab != NULL) {
1286 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1287 if (!sk)
1288 return sk;
1289 if (priority & __GFP_ZERO) {
1290 if (prot->clear_sk)
1291 prot->clear_sk(sk, prot->obj_size);
1292 else
1293 sk_prot_clear_nulls(sk, prot->obj_size);
1294 }
1295 } else
1296 sk = kmalloc(prot->obj_size, priority);
1297
1298 if (sk != NULL) {
1299 kmemcheck_annotate_bitfield(sk, flags);
1300
1301 if (security_sk_alloc(sk, family, priority))
1302 goto out_free;
1303
1304 if (!try_module_get(prot->owner))
1305 goto out_free_sec;
1306 sk_tx_queue_clear(sk);
1307 }
1308
1309 return sk;
1310
1311 out_free_sec:
1312 security_sk_free(sk);
1313 out_free:
1314 if (slab != NULL)
1315 kmem_cache_free(slab, sk);
1316 else
1317 kfree(sk);
1318 return NULL;
1319 }
1320
1321 static void sk_prot_free(struct proto *prot, struct sock *sk)
1322 {
1323 struct kmem_cache *slab;
1324 struct module *owner;
1325
1326 owner = prot->owner;
1327 slab = prot->slab;
1328
1329 security_sk_free(sk);
1330 if (slab != NULL)
1331 kmem_cache_free(slab, sk);
1332 else
1333 kfree(sk);
1334 module_put(owner);
1335 }
1336
1337 #if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
1338 void sock_update_classid(struct sock *sk)
1339 {
1340 u32 classid;
1341
1342 classid = task_cls_classid(current);
1343 if (classid != sk->sk_classid)
1344 sk->sk_classid = classid;
1345 }
1346 EXPORT_SYMBOL(sock_update_classid);
1347 #endif
1348
1349 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1350 void sock_update_netprioidx(struct sock *sk)
1351 {
1352 if (in_interrupt())
1353 return;
1354
1355 sk->sk_cgrp_prioidx = task_netprioidx(current);
1356 }
1357 EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1358 #endif
1359
1360 /**
1361 * sk_alloc - All socket objects are allocated here
1362 * @net: the applicable net namespace
1363 * @family: protocol family
1364 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1365 * @prot: struct proto associated with this new sock instance
1366 */
1367 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1368 struct proto *prot)
1369 {
1370 struct sock *sk;
1371
1372 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1373 if (sk) {
1374 sk->sk_family = family;
1375 /*
1376 * See comment in struct sock definition to understand
1377 * why we need sk_prot_creator -acme
1378 */
1379 sk->sk_prot = sk->sk_prot_creator = prot;
1380 sock_lock_init(sk);
1381 sock_net_set(sk, get_net(net));
1382 atomic_set(&sk->sk_wmem_alloc, 1);
1383
1384 sock_update_classid(sk);
1385 sock_update_netprioidx(sk);
1386 }
1387
1388 return sk;
1389 }
1390 EXPORT_SYMBOL(sk_alloc);
1391
1392 static void __sk_free(struct sock *sk)
1393 {
1394 struct sk_filter *filter;
1395
1396 if (sk->sk_destruct)
1397 sk->sk_destruct(sk);
1398
1399 filter = rcu_dereference_check(sk->sk_filter,
1400 atomic_read(&sk->sk_wmem_alloc) == 0);
1401 if (filter) {
1402 sk_filter_uncharge(sk, filter);
1403 RCU_INIT_POINTER(sk->sk_filter, NULL);
1404 }
1405
1406 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1407
1408 if (atomic_read(&sk->sk_omem_alloc))
1409 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1410 __func__, atomic_read(&sk->sk_omem_alloc));
1411
1412 if (sk->sk_peer_cred)
1413 put_cred(sk->sk_peer_cred);
1414 put_pid(sk->sk_peer_pid);
1415 put_net(sock_net(sk));
1416 sk_prot_free(sk->sk_prot_creator, sk);
1417 }
1418
1419 void sk_free(struct sock *sk)
1420 {
1421 /*
1422 * We subtract one from sk_wmem_alloc and can know if
1423 * some packets are still in some tx queue.
1424 * If not null, sock_wfree() will call __sk_free(sk) later
1425 */
1426 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1427 __sk_free(sk);
1428 }
1429 EXPORT_SYMBOL(sk_free);
1430
1431 /*
1432 * Last sock_put should drop reference to sk->sk_net. It has already
1433 * been dropped in sk_change_net. Taking reference to stopping namespace
1434 * is not an option.
1435 * Take reference to a socket to remove it from hash _alive_ and after that
1436 * destroy it in the context of init_net.
1437 */
1438 void sk_release_kernel(struct sock *sk)
1439 {
1440 if (sk == NULL || sk->sk_socket == NULL)
1441 return;
1442
1443 sock_hold(sk);
1444 sock_release(sk->sk_socket);
1445 release_net(sock_net(sk));
1446 sock_net_set(sk, get_net(&init_net));
1447 sock_put(sk);
1448 }
1449 EXPORT_SYMBOL(sk_release_kernel);
1450
1451 static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1452 {
1453 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1454 sock_update_memcg(newsk);
1455 }
1456
1457 /**
1458 * sk_clone_lock - clone a socket, and lock its clone
1459 * @sk: the socket to clone
1460 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1461 *
1462 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1463 */
1464 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1465 {
1466 struct sock *newsk;
1467
1468 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1469 if (newsk != NULL) {
1470 struct sk_filter *filter;
1471
1472 sock_copy(newsk, sk);
1473
1474 /* SANITY */
1475 get_net(sock_net(newsk));
1476 sk_node_init(&newsk->sk_node);
1477 sock_lock_init(newsk);
1478 bh_lock_sock(newsk);
1479 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1480 newsk->sk_backlog.len = 0;
1481
1482 atomic_set(&newsk->sk_rmem_alloc, 0);
1483 /*
1484 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1485 */
1486 atomic_set(&newsk->sk_wmem_alloc, 1);
1487 atomic_set(&newsk->sk_omem_alloc, 0);
1488 skb_queue_head_init(&newsk->sk_receive_queue);
1489 skb_queue_head_init(&newsk->sk_write_queue);
1490 #ifdef CONFIG_NET_DMA
1491 skb_queue_head_init(&newsk->sk_async_wait_queue);
1492 #endif
1493
1494 spin_lock_init(&newsk->sk_dst_lock);
1495 rwlock_init(&newsk->sk_callback_lock);
1496 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1497 af_callback_keys + newsk->sk_family,
1498 af_family_clock_key_strings[newsk->sk_family]);
1499
1500 newsk->sk_dst_cache = NULL;
1501 newsk->sk_wmem_queued = 0;
1502 newsk->sk_forward_alloc = 0;
1503 newsk->sk_send_head = NULL;
1504 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1505
1506 sock_reset_flag(newsk, SOCK_DONE);
1507 skb_queue_head_init(&newsk->sk_error_queue);
1508
1509 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1510 if (filter != NULL)
1511 sk_filter_charge(newsk, filter);
1512
1513 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1514 /* It is still raw copy of parent, so invalidate
1515 * destructor and make plain sk_free() */
1516 newsk->sk_destruct = NULL;
1517 bh_unlock_sock(newsk);
1518 sk_free(newsk);
1519 newsk = NULL;
1520 goto out;
1521 }
1522
1523 newsk->sk_err = 0;
1524 newsk->sk_priority = 0;
1525 /*
1526 * Before updating sk_refcnt, we must commit prior changes to memory
1527 * (Documentation/RCU/rculist_nulls.txt for details)
1528 */
1529 smp_wmb();
1530 atomic_set(&newsk->sk_refcnt, 2);
1531
1532 /*
1533 * Increment the counter in the same struct proto as the master
1534 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1535 * is the same as sk->sk_prot->socks, as this field was copied
1536 * with memcpy).
1537 *
1538 * This _changes_ the previous behaviour, where
1539 * tcp_create_openreq_child always was incrementing the
1540 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1541 * to be taken into account in all callers. -acme
1542 */
1543 sk_refcnt_debug_inc(newsk);
1544 sk_set_socket(newsk, NULL);
1545 newsk->sk_wq = NULL;
1546
1547 sk_update_clone(sk, newsk);
1548
1549 if (newsk->sk_prot->sockets_allocated)
1550 sk_sockets_allocated_inc(newsk);
1551
1552 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1553 net_enable_timestamp();
1554 }
1555 out:
1556 return newsk;
1557 }
1558 EXPORT_SYMBOL_GPL(sk_clone_lock);
1559
1560 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1561 {
1562 __sk_dst_set(sk, dst);
1563 sk->sk_route_caps = dst->dev->features;
1564 if (sk->sk_route_caps & NETIF_F_GSO)
1565 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1566 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1567 if (sk_can_gso(sk)) {
1568 if (dst->header_len) {
1569 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1570 } else {
1571 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1572 sk->sk_gso_max_size = dst->dev->gso_max_size;
1573 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1574 }
1575 }
1576 }
1577 EXPORT_SYMBOL_GPL(sk_setup_caps);
1578
1579 /*
1580 * Simple resource managers for sockets.
1581 */
1582
1583
1584 /*
1585 * Write buffer destructor automatically called from kfree_skb.
1586 */
1587 void sock_wfree(struct sk_buff *skb)
1588 {
1589 struct sock *sk = skb->sk;
1590 unsigned int len = skb->truesize;
1591
1592 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1593 /*
1594 * Keep a reference on sk_wmem_alloc, this will be released
1595 * after sk_write_space() call
1596 */
1597 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1598 sk->sk_write_space(sk);
1599 len = 1;
1600 }
1601 /*
1602 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1603 * could not do because of in-flight packets
1604 */
1605 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1606 __sk_free(sk);
1607 }
1608 EXPORT_SYMBOL(sock_wfree);
1609
1610 /*
1611 * Read buffer destructor automatically called from kfree_skb.
1612 */
1613 void sock_rfree(struct sk_buff *skb)
1614 {
1615 struct sock *sk = skb->sk;
1616 unsigned int len = skb->truesize;
1617
1618 atomic_sub(len, &sk->sk_rmem_alloc);
1619 sk_mem_uncharge(sk, len);
1620 }
1621 EXPORT_SYMBOL(sock_rfree);
1622
1623 void sock_edemux(struct sk_buff *skb)
1624 {
1625 struct sock *sk = skb->sk;
1626
1627 #ifdef CONFIG_INET
1628 if (sk->sk_state == TCP_TIME_WAIT)
1629 inet_twsk_put(inet_twsk(sk));
1630 else
1631 #endif
1632 sock_put(sk);
1633 }
1634 EXPORT_SYMBOL(sock_edemux);
1635
1636 kuid_t sock_i_uid(struct sock *sk)
1637 {
1638 kuid_t uid;
1639
1640 /*mtk_net: fix kernel bug*/
1641 if (!sk) {
1642 pr_info("sk == NULL for sock_i_uid\n");
1643 return GLOBAL_ROOT_UID;
1644 }
1645
1646 read_lock_bh(&sk->sk_callback_lock);
1647 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1648 read_unlock_bh(&sk->sk_callback_lock);
1649 return uid;
1650 }
1651 EXPORT_SYMBOL(sock_i_uid);
1652
1653 unsigned long sock_i_ino(struct sock *sk)
1654 {
1655 unsigned long ino;
1656
1657 read_lock_bh(&sk->sk_callback_lock);
1658 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1659 read_unlock_bh(&sk->sk_callback_lock);
1660 return ino;
1661 }
1662 EXPORT_SYMBOL(sock_i_ino);
1663
1664 /*
1665 * Allocate a skb from the socket's send buffer.
1666 */
1667 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1668 gfp_t priority)
1669 {
1670 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1671 struct sk_buff *skb = alloc_skb(size, priority);
1672 if (skb) {
1673 skb_set_owner_w(skb, sk);
1674 return skb;
1675 }
1676 }
1677 return NULL;
1678 }
1679 EXPORT_SYMBOL(sock_wmalloc);
1680
1681 /*
1682 * Allocate a skb from the socket's receive buffer.
1683 */
1684 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1685 gfp_t priority)
1686 {
1687 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1688 struct sk_buff *skb = alloc_skb(size, priority);
1689 if (skb) {
1690 skb_set_owner_r(skb, sk);
1691 return skb;
1692 }
1693 }
1694 return NULL;
1695 }
1696
1697 /*
1698 * Allocate a memory block from the socket's option memory buffer.
1699 */
1700 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1701 {
1702 if ((unsigned int)size <= sysctl_optmem_max &&
1703 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1704 void *mem;
1705 /* First do the add, to avoid the race if kmalloc
1706 * might sleep.
1707 */
1708 atomic_add(size, &sk->sk_omem_alloc);
1709 mem = kmalloc(size, priority);
1710 if (mem)
1711 return mem;
1712 atomic_sub(size, &sk->sk_omem_alloc);
1713 }
1714 return NULL;
1715 }
1716 EXPORT_SYMBOL(sock_kmalloc);
1717
1718 /*
1719 * Free an option memory block.
1720 */
1721 void sock_kfree_s(struct sock *sk, void *mem, int size)
1722 {
1723 kfree(mem);
1724 atomic_sub(size, &sk->sk_omem_alloc);
1725 }
1726 EXPORT_SYMBOL(sock_kfree_s);
1727
1728 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1729 I think, these locks should be removed for datagram sockets.
1730 */
1731 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1732 {
1733 DEFINE_WAIT(wait);
1734
1735 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1736 for (;;) {
1737 if (!timeo)
1738 break;
1739 if (signal_pending(current))
1740 break;
1741 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1742 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1743 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1744 break;
1745 if (sk->sk_shutdown & SEND_SHUTDOWN)
1746 break;
1747 if (sk->sk_err)
1748 break;
1749 timeo = schedule_timeout(timeo);
1750 }
1751 finish_wait(sk_sleep(sk), &wait);
1752 return timeo;
1753 }
1754
1755
1756 //debug funcion
1757
1758 static int sock_dump_info(struct sock *sk)
1759 {
1760 //dump receiver queue 128 bytes
1761 //struct sk_buff *skb;
1762 //char skbmsg[128];
1763 //dump receiver queue 128 bytes end
1764
1765 if(sk->sk_family == AF_UNIX)
1766 {
1767 struct unix_sock *u = unix_sk(sk);
1768 struct sock *other = NULL;
1769 if( (u->path.dentry !=NULL)&&(u->path.dentry->d_iname!=NULL))
1770 //if( (u->dentry !=NULL)&&(u->dentry->d_iname!=NULL))
1771 {
1772 #ifdef CONFIG_MTK_NET_LOGGING
1773 printk(KERN_INFO "[mtk_net][sock]sockdbg: socket-Name:%s \n",u->path.dentry->d_iname);
1774 #endif
1775 }
1776 else
1777 {
1778 #ifdef CONFIG_MTK_NET_LOGGING
1779 printk(KERN_INFO "[mtk_net][sock]sockdbg:socket Name (NULL)\n" );
1780 #endif
1781 }
1782
1783 if(sk->sk_socket && SOCK_INODE(sk->sk_socket))
1784 {
1785 #ifdef CONFIG_MTK_NET_LOGGING
1786 printk(KERN_INFO "[mtk_net][sock]sockdbg:socket Inode[%lu]\n" ,SOCK_INODE(sk->sk_socket)->i_ino);
1787 #endif
1788 }
1789
1790 other = unix_sk(sk)->peer ;
1791 if (!other)
1792 {
1793 #ifdef CONFIG_MTK_NET_LOGGING
1794 printk(KERN_INFO "[mtk_net][sock]sockdbg:peer is (NULL) \n");
1795 #endif
1796 } else{
1797
1798 if ((((struct unix_sock *)other)->path.dentry != NULL)&&(((struct unix_sock *)other)->path.dentry->d_iname != NULL))
1799 //if ((((struct unix_sock *)other)->dentry != NULL)&&(((struct unix_sock *)other)->dentry->d_iname != NULL))
1800 {
1801 #ifdef CONFIG_MTK_NET_LOGGING
1802 printk(KERN_INFO "[mtk_net][sock]sockdbg: Peer Name:%s \n",((struct unix_sock *)other)->path.dentry->d_iname);
1803 #endif
1804 }
1805 else
1806 {
1807 #ifdef CONFIG_MTK_NET_LOGGING
1808 printk(KERN_INFO "[mtk_net][sock]sockdbg: Peer Name (NULL) \n");
1809 #endif
1810 }
1811
1812 if(other->sk_socket && SOCK_INODE(other->sk_socket))
1813 {
1814 #ifdef CONFIG_MTK_NET_LOGGING
1815 printk(KERN_INFO "[mtk_net][sock]sockdbg: Peer Inode [%lu] \n", SOCK_INODE(other->sk_socket)->i_ino);
1816 #endif
1817 }
1818 #ifdef CONFIG_MTK_NET_LOGGING
1819 printk(KERN_INFO "[mtk_net][sock]sockdbg: Peer Recieve Queue len:%d \n",other->sk_receive_queue.qlen);
1820 #endif
1821 //dump receiver queue 128 bytes
1822 /* if ((skb = skb_peek_tail(&other->sk_receive_queue)) == NULL) {
1823
1824 printk(KERN_INFO "sockdbg: Peer Recieve Queue is null (warning) \n");
1825 }else{
1826 int i =0 ,len=0;
1827 if((skb->len !=0) && (skb->data != NULL)){
1828
1829 if(skb->len >= 127){
1830 len = 127 ;
1831 }else
1832 {
1833 len = skb->len ;
1834 }
1835 for (i=0;i<len;i++)
1836 sprintf(skbmsg+i, "%x", skb->data[i]);
1837
1838 skbmsg[len]= '\0' ;
1839
1840 printk(KERN_INFO "sockdbg: Peer Recieve Queue dump(%d bytes):%s\n", len, skbmsg);
1841
1842
1843 }else{
1844 printk(KERN_INFO "sockdbg: Peer Recieve skb error \n");
1845 }*/
1846 //dump receiver queue 128 bytes end
1847
1848 //}
1849 //dump receiver queue 128 bytes end
1850
1851 }
1852 }
1853
1854 return 0 ;
1855
1856
1857 }
1858
1859
1860
1861 /*
1862 * Generic send/receive buffer handlers
1863 */
1864
1865 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1866 unsigned long data_len, int noblock,
1867 int *errcode)
1868 {
1869 struct sk_buff *skb;
1870 gfp_t gfp_mask;
1871 long timeo;
1872 int err;
1873 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1874
1875 err = -EMSGSIZE;
1876 if (npages > MAX_SKB_FRAGS)
1877 goto failure;
1878
1879 gfp_mask = sk->sk_allocation;
1880 if (gfp_mask & __GFP_WAIT)
1881 gfp_mask |= __GFP_REPEAT;
1882
1883 timeo = sock_sndtimeo(sk, noblock);
1884 while (1) {
1885 err = sock_error(sk);
1886 if (err != 0)
1887 goto failure;
1888
1889 err = -EPIPE;
1890 if (sk->sk_shutdown & SEND_SHUTDOWN)
1891 goto failure;
1892
1893 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1894 skb = alloc_skb(header_len, gfp_mask);
1895 if (skb) {
1896 int i;
1897
1898 /* No pages, we're done... */
1899 if (!data_len)
1900 break;
1901
1902 skb->truesize += data_len;
1903 skb_shinfo(skb)->nr_frags = npages;
1904 for (i = 0; i < npages; i++) {
1905 struct page *page;
1906
1907 page = alloc_pages(sk->sk_allocation, 0);
1908 if (!page) {
1909 err = -ENOBUFS;
1910 skb_shinfo(skb)->nr_frags = i;
1911 kfree_skb(skb);
1912 goto failure;
1913 }
1914
1915 __skb_fill_page_desc(skb, i,
1916 page, 0,
1917 (data_len >= PAGE_SIZE ?
1918 PAGE_SIZE :
1919 data_len));
1920 data_len -= PAGE_SIZE;
1921 }
1922
1923 /* Full success... */
1924 break;
1925 }
1926 err = -ENOBUFS;
1927 goto failure;
1928 }
1929 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1930 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1931 err = -EAGAIN;
1932 if (!timeo)
1933 goto failure;
1934 if (signal_pending(current))
1935 goto interrupted;
1936
1937 sock_dump_info(sk);
1938 #ifdef CONFIG_MTK_NET_LOGGING
1939 printk(KERN_INFO "[mtk_net][sock]sockdbg: wait_for_wmem, timeo =%ld, wmem =%d, snd buf =%d \n",
1940 timeo, atomic_read(&sk->sk_wmem_alloc), sk->sk_sndbuf);
1941 #endif
1942 timeo = sock_wait_for_wmem(sk, timeo);
1943 #ifdef CONFIG_MTK_NET_LOGGING
1944 printk(KERN_INFO "[mtk_net][sock]sockdbg: wait_for_wmem done, header_len=0x%lx, data_len=0x%lx,timeo =%ld \n",
1945 header_len, data_len ,timeo);
1946 #endif
1947 }
1948
1949 skb_set_owner_w(skb, sk);
1950 return skb;
1951
1952 interrupted:
1953 err = sock_intr_errno(timeo);
1954 failure:
1955 *errcode = err;
1956 return NULL;
1957 }
1958 EXPORT_SYMBOL(sock_alloc_send_pskb);
1959
1960 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1961 int noblock, int *errcode)
1962 {
1963 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1964 }
1965 EXPORT_SYMBOL(sock_alloc_send_skb);
1966
1967 /* On 32bit arches, an skb frag is limited to 2^15 */
1968 #define SKB_FRAG_PAGE_ORDER get_order(32768)
1969
1970 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1971 {
1972 int order;
1973
1974 if (pfrag->page) {
1975 if (atomic_read(&pfrag->page->_count) == 1) {
1976 pfrag->offset = 0;
1977 return true;
1978 }
1979 if (pfrag->offset < pfrag->size)
1980 return true;
1981 put_page(pfrag->page);
1982 }
1983
1984 /* We restrict high order allocations to users that can afford to wait */
1985 order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
1986
1987 do {
1988 gfp_t gfp = sk->sk_allocation;
1989
1990 if (order)
1991 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
1992 pfrag->page = alloc_pages(gfp, order);
1993 if (likely(pfrag->page)) {
1994 pfrag->offset = 0;
1995 pfrag->size = PAGE_SIZE << order;
1996 return true;
1997 }
1998 } while (--order >= 0);
1999
2000 sk_enter_memory_pressure(sk);
2001 sk_stream_moderate_sndbuf(sk);
2002 return false;
2003 }
2004 EXPORT_SYMBOL(sk_page_frag_refill);
2005
2006 static void __lock_sock(struct sock *sk)
2007 __releases(&sk->sk_lock.slock)
2008 __acquires(&sk->sk_lock.slock)
2009 {
2010 DEFINE_WAIT(wait);
2011
2012 for (;;) {
2013 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2014 TASK_UNINTERRUPTIBLE);
2015 spin_unlock_bh(&sk->sk_lock.slock);
2016 schedule();
2017 spin_lock_bh(&sk->sk_lock.slock);
2018 if (!sock_owned_by_user(sk))
2019 break;
2020 }
2021 finish_wait(&sk->sk_lock.wq, &wait);
2022 }
2023
2024 static void __release_sock(struct sock *sk)
2025 __releases(&sk->sk_lock.slock)
2026 __acquires(&sk->sk_lock.slock)
2027 {
2028 struct sk_buff *skb = sk->sk_backlog.head;
2029
2030 do {
2031 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2032 bh_unlock_sock(sk);
2033
2034 do {
2035 struct sk_buff *next = skb->next;
2036
2037 prefetch(next);
2038 WARN_ON_ONCE(skb_dst_is_noref(skb));
2039 skb->next = NULL;
2040 sk_backlog_rcv(sk, skb);
2041
2042 /*
2043 * We are in process context here with softirqs
2044 * disabled, use cond_resched_softirq() to preempt.
2045 * This is safe to do because we've taken the backlog
2046 * queue private:
2047 */
2048 cond_resched_softirq();
2049
2050 skb = next;
2051 } while (skb != NULL);
2052
2053 bh_lock_sock(sk);
2054 } while ((skb = sk->sk_backlog.head) != NULL);
2055
2056 /*
2057 * Doing the zeroing here guarantee we can not loop forever
2058 * while a wild producer attempts to flood us.
2059 */
2060 sk->sk_backlog.len = 0;
2061 }
2062
2063 /**
2064 * sk_wait_data - wait for data to arrive at sk_receive_queue
2065 * @sk: sock to wait on
2066 * @timeo: for how long
2067 *
2068 * Now socket state including sk->sk_err is changed only under lock,
2069 * hence we may omit checks after joining wait queue.
2070 * We check receive queue before schedule() only as optimization;
2071 * it is very likely that release_sock() added new data.
2072 */
2073 int sk_wait_data(struct sock *sk, long *timeo)
2074 {
2075 int rc;
2076 DEFINE_WAIT(wait);
2077
2078 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2079 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2080 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
2081 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2082 finish_wait(sk_sleep(sk), &wait);
2083 return rc;
2084 }
2085 EXPORT_SYMBOL(sk_wait_data);
2086
2087 /**
2088 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2089 * @sk: socket
2090 * @size: memory size to allocate
2091 * @kind: allocation type
2092 *
2093 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2094 * rmem allocation. This function assumes that protocols which have
2095 * memory_pressure use sk_wmem_queued as write buffer accounting.
2096 */
2097 int __sk_mem_schedule(struct sock *sk, int size, int kind)
2098 {
2099 struct proto *prot = sk->sk_prot;
2100 int amt = sk_mem_pages(size);
2101 long allocated;
2102 int parent_status = UNDER_LIMIT;
2103
2104 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2105
2106 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
2107
2108 /* Under limit. */
2109 if (parent_status == UNDER_LIMIT &&
2110 allocated <= sk_prot_mem_limits(sk, 0)) {
2111 sk_leave_memory_pressure(sk);
2112 return 1;
2113 }
2114
2115 /* Under pressure. (we or our parents) */
2116 if ((parent_status > SOFT_LIMIT) ||
2117 allocated > sk_prot_mem_limits(sk, 1))
2118 sk_enter_memory_pressure(sk);
2119
2120 /* Over hard limit (we or our parents) */
2121 if ((parent_status == OVER_LIMIT) ||
2122 (allocated > sk_prot_mem_limits(sk, 2)))
2123 goto suppress_allocation;
2124
2125 /* guarantee minimum buffer size under pressure */
2126 if (kind == SK_MEM_RECV) {
2127 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2128 return 1;
2129
2130 } else { /* SK_MEM_SEND */
2131 if (sk->sk_type == SOCK_STREAM) {
2132 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2133 return 1;
2134 } else if (atomic_read(&sk->sk_wmem_alloc) <
2135 prot->sysctl_wmem[0])
2136 return 1;
2137 }
2138
2139 if (sk_has_memory_pressure(sk)) {
2140 int alloc;
2141
2142 if (!sk_under_memory_pressure(sk))
2143 return 1;
2144 alloc = sk_sockets_allocated_read_positive(sk);
2145 if (sk_prot_mem_limits(sk, 2) > alloc *
2146 sk_mem_pages(sk->sk_wmem_queued +
2147 atomic_read(&sk->sk_rmem_alloc) +
2148 sk->sk_forward_alloc))
2149 return 1;
2150 }
2151
2152 suppress_allocation:
2153
2154 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2155 sk_stream_moderate_sndbuf(sk);
2156
2157 /* Fail only if socket is _under_ its sndbuf.
2158 * In this case we cannot block, so that we have to fail.
2159 */
2160 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2161 return 1;
2162 }
2163
2164 trace_sock_exceed_buf_limit(sk, prot, allocated);
2165
2166 /* Alas. Undo changes. */
2167 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2168
2169 sk_memory_allocated_sub(sk, amt);
2170
2171 return 0;
2172 }
2173 EXPORT_SYMBOL(__sk_mem_schedule);
2174
2175 /**
2176 * __sk_reclaim - reclaim memory_allocated
2177 * @sk: socket
2178 */
2179 void __sk_mem_reclaim(struct sock *sk)
2180 {
2181 sk_memory_allocated_sub(sk,
2182 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
2183 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2184
2185 if (sk_under_memory_pressure(sk) &&
2186 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2187 sk_leave_memory_pressure(sk);
2188 }
2189 EXPORT_SYMBOL(__sk_mem_reclaim);
2190
2191
2192 /*
2193 * Set of default routines for initialising struct proto_ops when
2194 * the protocol does not support a particular function. In certain
2195 * cases where it makes no sense for a protocol to have a "do nothing"
2196 * function, some default processing is provided.
2197 */
2198
2199 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2200 {
2201 return -EOPNOTSUPP;
2202 }
2203 EXPORT_SYMBOL(sock_no_bind);
2204
2205 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2206 int len, int flags)
2207 {
2208 return -EOPNOTSUPP;
2209 }
2210 EXPORT_SYMBOL(sock_no_connect);
2211
2212 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2213 {
2214 return -EOPNOTSUPP;
2215 }
2216 EXPORT_SYMBOL(sock_no_socketpair);
2217
2218 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2219 {
2220 return -EOPNOTSUPP;
2221 }
2222 EXPORT_SYMBOL(sock_no_accept);
2223
2224 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2225 int *len, int peer)
2226 {
2227 return -EOPNOTSUPP;
2228 }
2229 EXPORT_SYMBOL(sock_no_getname);
2230
2231 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2232 {
2233 return 0;
2234 }
2235 EXPORT_SYMBOL(sock_no_poll);
2236
2237 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2238 {
2239 return -EOPNOTSUPP;
2240 }
2241 EXPORT_SYMBOL(sock_no_ioctl);
2242
2243 int sock_no_listen(struct socket *sock, int backlog)
2244 {
2245 return -EOPNOTSUPP;
2246 }
2247 EXPORT_SYMBOL(sock_no_listen);
2248
2249 int sock_no_shutdown(struct socket *sock, int how)
2250 {
2251 return -EOPNOTSUPP;
2252 }
2253 EXPORT_SYMBOL(sock_no_shutdown);
2254
2255 int sock_no_setsockopt(struct socket *sock, int level, int optname,
2256 char __user *optval, unsigned int optlen)
2257 {
2258 return -EOPNOTSUPP;
2259 }
2260 EXPORT_SYMBOL(sock_no_setsockopt);
2261
2262 int sock_no_getsockopt(struct socket *sock, int level, int optname,
2263 char __user *optval, int __user *optlen)
2264 {
2265 return -EOPNOTSUPP;
2266 }
2267 EXPORT_SYMBOL(sock_no_getsockopt);
2268
2269 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2270 size_t len)
2271 {
2272 return -EOPNOTSUPP;
2273 }
2274 EXPORT_SYMBOL(sock_no_sendmsg);
2275
2276 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2277 size_t len, int flags)
2278 {
2279 return -EOPNOTSUPP;
2280 }
2281 EXPORT_SYMBOL(sock_no_recvmsg);
2282
2283 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2284 {
2285 /* Mirror missing mmap method error code */
2286 return -ENODEV;
2287 }
2288 EXPORT_SYMBOL(sock_no_mmap);
2289
2290 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2291 {
2292 ssize_t res;
2293 struct msghdr msg = {.msg_flags = flags};
2294 struct kvec iov;
2295 char *kaddr = kmap(page);
2296 iov.iov_base = kaddr + offset;
2297 iov.iov_len = size;
2298 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2299 kunmap(page);
2300 return res;
2301 }
2302 EXPORT_SYMBOL(sock_no_sendpage);
2303
2304 /*
2305 * Default Socket Callbacks
2306 */
2307
2308 static void sock_def_wakeup(struct sock *sk)
2309 {
2310 struct socket_wq *wq;
2311
2312 rcu_read_lock();
2313 wq = rcu_dereference(sk->sk_wq);
2314 if (wq_has_sleeper(wq))
2315 wake_up_interruptible_all(&wq->wait);
2316 rcu_read_unlock();
2317 }
2318
2319 static void sock_def_error_report(struct sock *sk)
2320 {
2321 struct socket_wq *wq;
2322
2323 rcu_read_lock();
2324 wq = rcu_dereference(sk->sk_wq);
2325 if (wq_has_sleeper(wq))
2326 wake_up_interruptible_poll(&wq->wait, POLLERR);
2327 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2328 rcu_read_unlock();
2329 }
2330
2331 static void sock_def_readable(struct sock *sk, int len)
2332 {
2333 struct socket_wq *wq;
2334
2335 rcu_read_lock();
2336 wq = rcu_dereference(sk->sk_wq);
2337 if (wq_has_sleeper(wq))
2338 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2339 POLLRDNORM | POLLRDBAND);
2340 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2341 rcu_read_unlock();
2342 }
2343
2344 static void sock_def_write_space(struct sock *sk)
2345 {
2346 struct socket_wq *wq;
2347
2348 rcu_read_lock();
2349
2350 /* Do not wake up a writer until he can make "significant"
2351 * progress. --DaveM
2352 */
2353 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2354 wq = rcu_dereference(sk->sk_wq);
2355 if (wq_has_sleeper(wq))
2356 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2357 POLLWRNORM | POLLWRBAND);
2358
2359 /* Should agree with poll, otherwise some programs break */
2360 if (sock_writeable(sk))
2361 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2362 }
2363
2364 rcu_read_unlock();
2365 }
2366
2367 static void sock_def_destruct(struct sock *sk)
2368 {
2369 kfree(sk->sk_protinfo);
2370 }
2371
2372 void sk_send_sigurg(struct sock *sk)
2373 {
2374 if (sk->sk_socket && sk->sk_socket->file)
2375 if (send_sigurg(&sk->sk_socket->file->f_owner))
2376 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2377 }
2378 EXPORT_SYMBOL(sk_send_sigurg);
2379
2380 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2381 unsigned long expires)
2382 {
2383 if (!mod_timer(timer, expires))
2384 sock_hold(sk);
2385 }
2386 EXPORT_SYMBOL(sk_reset_timer);
2387
2388 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2389 {
2390 if (del_timer(timer))
2391 __sock_put(sk);
2392 }
2393 EXPORT_SYMBOL(sk_stop_timer);
2394
2395 void sock_init_data(struct socket *sock, struct sock *sk)
2396 {
2397 skb_queue_head_init(&sk->sk_receive_queue);
2398 skb_queue_head_init(&sk->sk_write_queue);
2399 skb_queue_head_init(&sk->sk_error_queue);
2400 #ifdef CONFIG_NET_DMA
2401 skb_queue_head_init(&sk->sk_async_wait_queue);
2402 #endif
2403
2404 sk->sk_send_head = NULL;
2405
2406 init_timer(&sk->sk_timer);
2407
2408 sk->sk_allocation = GFP_KERNEL;
2409 sk->sk_rcvbuf = sysctl_rmem_default;
2410 sk->sk_sndbuf = sysctl_wmem_default;
2411 sk->sk_state = TCP_CLOSE;
2412 sk_set_socket(sk, sock);
2413
2414 sock_set_flag(sk, SOCK_ZAPPED);
2415
2416 if (sock) {
2417 sk->sk_type = sock->type;
2418 sk->sk_wq = sock->wq;
2419 sock->sk = sk;
2420 } else
2421 sk->sk_wq = NULL;
2422
2423 spin_lock_init(&sk->sk_dst_lock);
2424 rwlock_init(&sk->sk_callback_lock);
2425 lockdep_set_class_and_name(&sk->sk_callback_lock,
2426 af_callback_keys + sk->sk_family,
2427 af_family_clock_key_strings[sk->sk_family]);
2428
2429 sk->sk_state_change = sock_def_wakeup;
2430 sk->sk_data_ready = sock_def_readable;
2431 sk->sk_write_space = sock_def_write_space;
2432 sk->sk_error_report = sock_def_error_report;
2433 sk->sk_destruct = sock_def_destruct;
2434
2435 sk->sk_frag.page = NULL;
2436 sk->sk_frag.offset = 0;
2437 sk->sk_peek_off = -1;
2438
2439 sk->sk_peer_pid = NULL;
2440 sk->sk_peer_cred = NULL;
2441 sk->sk_write_pending = 0;
2442 sk->sk_rcvlowat = 1;
2443 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2444 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2445
2446 sk->sk_stamp = ktime_set(-1L, 0);
2447
2448 sk->sk_pacing_rate = ~0U;
2449 /*
2450 * Before updating sk_refcnt, we must commit prior changes to memory
2451 * (Documentation/RCU/rculist_nulls.txt for details)
2452 */
2453 smp_wmb();
2454 atomic_set(&sk->sk_refcnt, 1);
2455 atomic_set(&sk->sk_drops, 0);
2456 }
2457 EXPORT_SYMBOL(sock_init_data);
2458
2459 void lock_sock_nested(struct sock *sk, int subclass)
2460 {
2461 might_sleep();
2462 spin_lock_bh(&sk->sk_lock.slock);
2463 if (sk->sk_lock.owned)
2464 __lock_sock(sk);
2465 sk->sk_lock.owned = 1;
2466 spin_unlock(&sk->sk_lock.slock);
2467 /*
2468 * The sk_lock has mutex_lock() semantics here:
2469 */
2470 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2471 local_bh_enable();
2472 }
2473 EXPORT_SYMBOL(lock_sock_nested);
2474
2475 void release_sock(struct sock *sk)
2476 {
2477 /*
2478 * The sk_lock has mutex_unlock() semantics:
2479 */
2480 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2481
2482 spin_lock_bh(&sk->sk_lock.slock);
2483 if (sk->sk_backlog.tail)
2484 __release_sock(sk);
2485
2486 /* Warning : release_cb() might need to release sk ownership,
2487 * ie call sock_release_ownership(sk) before us.
2488 */
2489 if (sk->sk_prot->release_cb)
2490 sk->sk_prot->release_cb(sk);
2491
2492 sock_release_ownership(sk);
2493 if (waitqueue_active(&sk->sk_lock.wq))
2494 wake_up(&sk->sk_lock.wq);
2495 spin_unlock_bh(&sk->sk_lock.slock);
2496 }
2497 EXPORT_SYMBOL(release_sock);
2498
2499 /**
2500 * lock_sock_fast - fast version of lock_sock
2501 * @sk: socket
2502 *
2503 * This version should be used for very small section, where process wont block
2504 * return false if fast path is taken
2505 * sk_lock.slock locked, owned = 0, BH disabled
2506 * return true if slow path is taken
2507 * sk_lock.slock unlocked, owned = 1, BH enabled
2508 */
2509 bool lock_sock_fast(struct sock *sk)
2510 {
2511 might_sleep();
2512 spin_lock_bh(&sk->sk_lock.slock);
2513
2514 if (!sk->sk_lock.owned)
2515 /*
2516 * Note : We must disable BH
2517 */
2518 return false;
2519
2520 __lock_sock(sk);
2521 sk->sk_lock.owned = 1;
2522 spin_unlock(&sk->sk_lock.slock);
2523 /*
2524 * The sk_lock has mutex_lock() semantics here:
2525 */
2526 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2527 local_bh_enable();
2528 return true;
2529 }
2530 EXPORT_SYMBOL(lock_sock_fast);
2531
2532 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2533 {
2534 struct timeval tv;
2535 if (!sock_flag(sk, SOCK_TIMESTAMP))
2536 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2537 tv = ktime_to_timeval(sk->sk_stamp);
2538 if (tv.tv_sec == -1)
2539 return -ENOENT;
2540 if (tv.tv_sec == 0) {
2541 sk->sk_stamp = ktime_get_real();
2542 tv = ktime_to_timeval(sk->sk_stamp);
2543 }
2544 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2545 }
2546 EXPORT_SYMBOL(sock_get_timestamp);
2547
2548 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2549 {
2550 struct timespec ts;
2551 if (!sock_flag(sk, SOCK_TIMESTAMP))
2552 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2553 ts = ktime_to_timespec(sk->sk_stamp);
2554 if (ts.tv_sec == -1)
2555 return -ENOENT;
2556 if (ts.tv_sec == 0) {
2557 sk->sk_stamp = ktime_get_real();
2558 ts = ktime_to_timespec(sk->sk_stamp);
2559 }
2560 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2561 }
2562 EXPORT_SYMBOL(sock_get_timestampns);
2563
2564 void sock_enable_timestamp(struct sock *sk, int flag)
2565 {
2566 if (!sock_flag(sk, flag)) {
2567 unsigned long previous_flags = sk->sk_flags;
2568
2569 sock_set_flag(sk, flag);
2570 /*
2571 * we just set one of the two flags which require net
2572 * time stamping, but time stamping might have been on
2573 * already because of the other one
2574 */
2575 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2576 net_enable_timestamp();
2577 }
2578 }
2579
2580 /*
2581 * Get a socket option on an socket.
2582 *
2583 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2584 * asynchronous errors should be reported by getsockopt. We assume
2585 * this means if you specify SO_ERROR (otherwise whats the point of it).
2586 */
2587 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2588 char __user *optval, int __user *optlen)
2589 {
2590 struct sock *sk = sock->sk;
2591
2592 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2593 }
2594 EXPORT_SYMBOL(sock_common_getsockopt);
2595
2596 #ifdef CONFIG_COMPAT
2597 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2598 char __user *optval, int __user *optlen)
2599 {
2600 struct sock *sk = sock->sk;
2601
2602 if (sk->sk_prot->compat_getsockopt != NULL)
2603 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2604 optval, optlen);
2605 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2606 }
2607 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2608 #endif
2609
2610 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2611 struct msghdr *msg, size_t size, int flags)
2612 {
2613 struct sock *sk = sock->sk;
2614 int addr_len = 0;
2615 int err;
2616
2617 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2618 flags & ~MSG_DONTWAIT, &addr_len);
2619 if (err >= 0)
2620 msg->msg_namelen = addr_len;
2621 return err;
2622 }
2623 EXPORT_SYMBOL(sock_common_recvmsg);
2624
2625 /*
2626 * Set socket options on an inet socket.
2627 */
2628 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2629 char __user *optval, unsigned int optlen)
2630 {
2631 struct sock *sk = sock->sk;
2632
2633 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2634 }
2635 EXPORT_SYMBOL(sock_common_setsockopt);
2636
2637 #ifdef CONFIG_COMPAT
2638 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2639 char __user *optval, unsigned int optlen)
2640 {
2641 struct sock *sk = sock->sk;
2642
2643 if (sk->sk_prot->compat_setsockopt != NULL)
2644 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2645 optval, optlen);
2646 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2647 }
2648 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2649 #endif
2650
2651 void sk_common_release(struct sock *sk)
2652 {
2653 if (sk->sk_prot->destroy)
2654 sk->sk_prot->destroy(sk);
2655
2656 /*
2657 * Observation: when sock_common_release is called, processes have
2658 * no access to socket. But net still has.
2659 * Step one, detach it from networking:
2660 *
2661 * A. Remove from hash tables.
2662 */
2663
2664 sk->sk_prot->unhash(sk);
2665
2666 /*
2667 * In this point socket cannot receive new packets, but it is possible
2668 * that some packets are in flight because some CPU runs receiver and
2669 * did hash table lookup before we unhashed socket. They will achieve
2670 * receive queue and will be purged by socket destructor.
2671 *
2672 * Also we still have packets pending on receive queue and probably,
2673 * our own packets waiting in device queues. sock_destroy will drain
2674 * receive queue, but transmitted packets will delay socket destruction
2675 * until the last reference will be released.
2676 */
2677
2678 sock_orphan(sk);
2679
2680 xfrm_sk_free_policy(sk);
2681
2682 sk_refcnt_debug_release(sk);
2683
2684 if (sk->sk_frag.page) {
2685 put_page(sk->sk_frag.page);
2686 sk->sk_frag.page = NULL;
2687 }
2688
2689 sock_put(sk);
2690 }
2691 EXPORT_SYMBOL(sk_common_release);
2692
2693 #ifdef CONFIG_PROC_FS
2694 #define PROTO_INUSE_NR 64 /* should be enough for the first time */
2695 struct prot_inuse {
2696 int val[PROTO_INUSE_NR];
2697 };
2698
2699 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2700
2701 #ifdef CONFIG_NET_NS
2702 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2703 {
2704 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2705 }
2706 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2707
2708 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2709 {
2710 int cpu, idx = prot->inuse_idx;
2711 int res = 0;
2712
2713 for_each_possible_cpu(cpu)
2714 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2715
2716 return res >= 0 ? res : 0;
2717 }
2718 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2719
2720 static int __net_init sock_inuse_init_net(struct net *net)
2721 {
2722 net->core.inuse = alloc_percpu(struct prot_inuse);
2723 return net->core.inuse ? 0 : -ENOMEM;
2724 }
2725
2726 static void __net_exit sock_inuse_exit_net(struct net *net)
2727 {
2728 free_percpu(net->core.inuse);
2729 }
2730
2731 static struct pernet_operations net_inuse_ops = {
2732 .init = sock_inuse_init_net,
2733 .exit = sock_inuse_exit_net,
2734 };
2735
2736 static __init int net_inuse_init(void)
2737 {
2738 if (register_pernet_subsys(&net_inuse_ops))
2739 panic("Cannot initialize net inuse counters");
2740
2741 return 0;
2742 }
2743
2744 core_initcall(net_inuse_init);
2745 #else
2746 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2747
2748 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2749 {
2750 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2751 }
2752 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2753
2754 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2755 {
2756 int cpu, idx = prot->inuse_idx;
2757 int res = 0;
2758
2759 for_each_possible_cpu(cpu)
2760 res += per_cpu(prot_inuse, cpu).val[idx];
2761
2762 return res >= 0 ? res : 0;
2763 }
2764 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2765 #endif
2766
2767 static void assign_proto_idx(struct proto *prot)
2768 {
2769 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2770
2771 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2772 pr_err("PROTO_INUSE_NR exhausted\n");
2773 return;
2774 }
2775
2776 set_bit(prot->inuse_idx, proto_inuse_idx);
2777 }
2778
2779 static void release_proto_idx(struct proto *prot)
2780 {
2781 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2782 clear_bit(prot->inuse_idx, proto_inuse_idx);
2783 }
2784 #else
2785 static inline void assign_proto_idx(struct proto *prot)
2786 {
2787 }
2788
2789 static inline void release_proto_idx(struct proto *prot)
2790 {
2791 }
2792 #endif
2793
2794 int proto_register(struct proto *prot, int alloc_slab)
2795 {
2796 if (alloc_slab) {
2797 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2798 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2799 NULL);
2800
2801 if (prot->slab == NULL) {
2802 pr_crit("%s: Can't create sock SLAB cache!\n",
2803 prot->name);
2804 goto out;
2805 }
2806
2807 if (prot->rsk_prot != NULL) {
2808 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2809 if (prot->rsk_prot->slab_name == NULL)
2810 goto out_free_sock_slab;
2811
2812 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2813 prot->rsk_prot->obj_size, 0,
2814 SLAB_HWCACHE_ALIGN, NULL);
2815
2816 if (prot->rsk_prot->slab == NULL) {
2817 pr_crit("%s: Can't create request sock SLAB cache!\n",
2818 prot->name);
2819 goto out_free_request_sock_slab_name;
2820 }
2821 }
2822
2823 if (prot->twsk_prot != NULL) {
2824 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2825
2826 if (prot->twsk_prot->twsk_slab_name == NULL)
2827 goto out_free_request_sock_slab;
2828
2829 prot->twsk_prot->twsk_slab =
2830 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2831 prot->twsk_prot->twsk_obj_size,
2832 0,
2833 SLAB_HWCACHE_ALIGN |
2834 prot->slab_flags,
2835 NULL);
2836 if (prot->twsk_prot->twsk_slab == NULL)
2837 goto out_free_timewait_sock_slab_name;
2838 }
2839 }
2840
2841 mutex_lock(&proto_list_mutex);
2842 list_add(&prot->node, &proto_list);
2843 assign_proto_idx(prot);
2844 mutex_unlock(&proto_list_mutex);
2845 return 0;
2846
2847 out_free_timewait_sock_slab_name:
2848 kfree(prot->twsk_prot->twsk_slab_name);
2849 out_free_request_sock_slab:
2850 if (prot->rsk_prot && prot->rsk_prot->slab) {
2851 kmem_cache_destroy(prot->rsk_prot->slab);
2852 prot->rsk_prot->slab = NULL;
2853 }
2854 out_free_request_sock_slab_name:
2855 if (prot->rsk_prot)
2856 kfree(prot->rsk_prot->slab_name);
2857 out_free_sock_slab:
2858 kmem_cache_destroy(prot->slab);
2859 prot->slab = NULL;
2860 out:
2861 return -ENOBUFS;
2862 }
2863 EXPORT_SYMBOL(proto_register);
2864
2865 void proto_unregister(struct proto *prot)
2866 {
2867 mutex_lock(&proto_list_mutex);
2868 release_proto_idx(prot);
2869 list_del(&prot->node);
2870 mutex_unlock(&proto_list_mutex);
2871
2872 if (prot->slab != NULL) {
2873 kmem_cache_destroy(prot->slab);
2874 prot->slab = NULL;
2875 }
2876
2877 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2878 kmem_cache_destroy(prot->rsk_prot->slab);
2879 kfree(prot->rsk_prot->slab_name);
2880 prot->rsk_prot->slab = NULL;
2881 }
2882
2883 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2884 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2885 kfree(prot->twsk_prot->twsk_slab_name);
2886 prot->twsk_prot->twsk_slab = NULL;
2887 }
2888 }
2889 EXPORT_SYMBOL(proto_unregister);
2890
2891 #ifdef CONFIG_PROC_FS
2892 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2893 __acquires(proto_list_mutex)
2894 {
2895 mutex_lock(&proto_list_mutex);
2896 return seq_list_start_head(&proto_list, *pos);
2897 }
2898
2899 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2900 {
2901 return seq_list_next(v, &proto_list, pos);
2902 }
2903
2904 static void proto_seq_stop(struct seq_file *seq, void *v)
2905 __releases(proto_list_mutex)
2906 {
2907 mutex_unlock(&proto_list_mutex);
2908 }
2909
2910 static char proto_method_implemented(const void *method)
2911 {
2912 return method == NULL ? 'n' : 'y';
2913 }
2914 static long sock_prot_memory_allocated(struct proto *proto)
2915 {
2916 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2917 }
2918
2919 static char *sock_prot_memory_pressure(struct proto *proto)
2920 {
2921 return proto->memory_pressure != NULL ?
2922 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2923 }
2924
2925 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2926 {
2927
2928 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2929 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2930 proto->name,
2931 proto->obj_size,
2932 sock_prot_inuse_get(seq_file_net(seq), proto),
2933 sock_prot_memory_allocated(proto),
2934 sock_prot_memory_pressure(proto),
2935 proto->max_header,
2936 proto->slab == NULL ? "no" : "yes",
2937 module_name(proto->owner),
2938 proto_method_implemented(proto->close),
2939 proto_method_implemented(proto->connect),
2940 proto_method_implemented(proto->disconnect),
2941 proto_method_implemented(proto->accept),
2942 proto_method_implemented(proto->ioctl),
2943 proto_method_implemented(proto->init),
2944 proto_method_implemented(proto->destroy),
2945 proto_method_implemented(proto->shutdown),
2946 proto_method_implemented(proto->setsockopt),
2947 proto_method_implemented(proto->getsockopt),
2948 proto_method_implemented(proto->sendmsg),
2949 proto_method_implemented(proto->recvmsg),
2950 proto_method_implemented(proto->sendpage),
2951 proto_method_implemented(proto->bind),
2952 proto_method_implemented(proto->backlog_rcv),
2953 proto_method_implemented(proto->hash),
2954 proto_method_implemented(proto->unhash),
2955 proto_method_implemented(proto->get_port),
2956 proto_method_implemented(proto->enter_memory_pressure));
2957 }
2958
2959 static int proto_seq_show(struct seq_file *seq, void *v)
2960 {
2961 if (v == &proto_list)
2962 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2963 "protocol",
2964 "size",
2965 "sockets",
2966 "memory",
2967 "press",
2968 "maxhdr",
2969 "slab",
2970 "module",
2971 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2972 else
2973 proto_seq_printf(seq, list_entry(v, struct proto, node));
2974 return 0;
2975 }
2976
2977 static const struct seq_operations proto_seq_ops = {
2978 .start = proto_seq_start,
2979 .next = proto_seq_next,
2980 .stop = proto_seq_stop,
2981 .show = proto_seq_show,
2982 };
2983
2984 static int proto_seq_open(struct inode *inode, struct file *file)
2985 {
2986 return seq_open_net(inode, file, &proto_seq_ops,
2987 sizeof(struct seq_net_private));
2988 }
2989
2990 static const struct file_operations proto_seq_fops = {
2991 .owner = THIS_MODULE,
2992 .open = proto_seq_open,
2993 .read = seq_read,
2994 .llseek = seq_lseek,
2995 .release = seq_release_net,
2996 };
2997
2998 static __net_init int proto_init_net(struct net *net)
2999 {
3000 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
3001 return -ENOMEM;
3002
3003 return 0;
3004 }
3005
3006 static __net_exit void proto_exit_net(struct net *net)
3007 {
3008 remove_proc_entry("protocols", net->proc_net);
3009 }
3010
3011
3012 static __net_initdata struct pernet_operations proto_net_ops = {
3013 .init = proto_init_net,
3014 .exit = proto_exit_net,
3015 };
3016
3017 static int __init proto_init(void)
3018 {
3019 return register_pernet_subsys(&proto_net_ops);
3020 }
3021
3022 subsys_initcall(proto_init);
3023
3024 #endif /* PROC_FS */