e3cb45411f345bf2d132ab77ce9aecc1d8ff991a
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / core / sock.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/types.h>
97 #include <linux/socket.h>
98 #include <linux/in.h>
99 #include <linux/kernel.h>
100 #include <linux/module.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <linux/sched.h>
104 #include <linux/timer.h>
105 #include <linux/string.h>
106 #include <linux/sockios.h>
107 #include <linux/net.h>
108 #include <linux/mm.h>
109 #include <linux/slab.h>
110 #include <linux/interrupt.h>
111 #include <linux/poll.h>
112 #include <linux/tcp.h>
113 #include <linux/init.h>
114 #include <linux/highmem.h>
115 #include <linux/user_namespace.h>
116 #include <linux/static_key.h>
117 #include <linux/memcontrol.h>
118 #include <linux/prefetch.h>
119
120 #include <asm/uaccess.h>
121
122 #include <linux/netdevice.h>
123 #include <net/protocol.h>
124 #include <linux/skbuff.h>
125 #include <net/net_namespace.h>
126 #include <net/request_sock.h>
127 #include <net/sock.h>
128 #include <linux/net_tstamp.h>
129 #include <net/xfrm.h>
130 #include <linux/ipsec.h>
131 #include <net/cls_cgroup.h>
132 #include <net/netprio_cgroup.h>
133
134 #include <linux/filter.h>
135
136 #include <trace/events/sock.h>
137
138 #ifdef CONFIG_INET
139 #include <net/tcp.h>
140 #endif
141
142 static DEFINE_MUTEX(proto_list_mutex);
143 static LIST_HEAD(proto_list);
144
145 /**
146 * sk_ns_capable - General socket capability test
147 * @sk: Socket to use a capability on or through
148 * @user_ns: The user namespace of the capability to use
149 * @cap: The capability to use
150 *
151 * Test to see if the opener of the socket had when the socket was
152 * created and the current process has the capability @cap in the user
153 * namespace @user_ns.
154 */
155 bool sk_ns_capable(const struct sock *sk,
156 struct user_namespace *user_ns, int cap)
157 {
158 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
159 ns_capable(user_ns, cap);
160 }
161 EXPORT_SYMBOL(sk_ns_capable);
162
163 /**
164 * sk_capable - Socket global capability test
165 * @sk: Socket to use a capability on or through
166 * @cap: The global capbility to use
167 *
168 * Test to see if the opener of the socket had when the socket was
169 * created and the current process has the capability @cap in all user
170 * namespaces.
171 */
172 bool sk_capable(const struct sock *sk, int cap)
173 {
174 return sk_ns_capable(sk, &init_user_ns, cap);
175 }
176 EXPORT_SYMBOL(sk_capable);
177
178 /**
179 * sk_net_capable - Network namespace socket capability test
180 * @sk: Socket to use a capability on or through
181 * @cap: The capability to use
182 *
183 * Test to see if the opener of the socket had when the socke was created
184 * and the current process has the capability @cap over the network namespace
185 * the socket is a member of.
186 */
187 bool sk_net_capable(const struct sock *sk, int cap)
188 {
189 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
190 }
191 EXPORT_SYMBOL(sk_net_capable);
192
193
194 #ifdef CONFIG_MEMCG_KMEM
195 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
196 {
197 struct proto *proto;
198 int ret = 0;
199
200 mutex_lock(&proto_list_mutex);
201 list_for_each_entry(proto, &proto_list, node) {
202 if (proto->init_cgroup) {
203 ret = proto->init_cgroup(memcg, ss);
204 if (ret)
205 goto out;
206 }
207 }
208
209 mutex_unlock(&proto_list_mutex);
210 return ret;
211 out:
212 list_for_each_entry_continue_reverse(proto, &proto_list, node)
213 if (proto->destroy_cgroup)
214 proto->destroy_cgroup(memcg);
215 mutex_unlock(&proto_list_mutex);
216 return ret;
217 }
218
219 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
220 {
221 struct proto *proto;
222
223 mutex_lock(&proto_list_mutex);
224 list_for_each_entry_reverse(proto, &proto_list, node)
225 if (proto->destroy_cgroup)
226 proto->destroy_cgroup(memcg);
227 mutex_unlock(&proto_list_mutex);
228 }
229 #endif
230
231 /*
232 * Each address family might have different locking rules, so we have
233 * one slock key per address family:
234 */
235 static struct lock_class_key af_family_keys[AF_MAX];
236 static struct lock_class_key af_family_slock_keys[AF_MAX];
237
238 #if defined(CONFIG_MEMCG_KMEM)
239 struct static_key memcg_socket_limit_enabled;
240 EXPORT_SYMBOL(memcg_socket_limit_enabled);
241 #endif
242
243 /*
244 * Make lock validator output more readable. (we pre-construct these
245 * strings build-time, so that runtime initialization of socket
246 * locks is fast):
247 */
248 static const char *const af_family_key_strings[AF_MAX+1] = {
249 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
250 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
251 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
252 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
253 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
254 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
255 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
256 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
257 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
258 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
259 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
260 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
261 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
262 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
263 };
264 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
265 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
266 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
267 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
268 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
269 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
270 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
271 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
272 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
273 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
274 "slock-27" , "slock-28" , "slock-AF_CAN" ,
275 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
276 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
277 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
278 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
279 };
280 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
281 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
282 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
283 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
284 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
285 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
286 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
287 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
288 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
289 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
290 "clock-27" , "clock-28" , "clock-AF_CAN" ,
291 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
292 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
293 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
294 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
295 };
296
297 /*
298 * sk_callback_lock locking rules are per-address-family,
299 * so split the lock classes by using a per-AF key:
300 */
301 static struct lock_class_key af_callback_keys[AF_MAX];
302
303 /* Take into consideration the size of the struct sk_buff overhead in the
304 * determination of these values, since that is non-constant across
305 * platforms. This makes socket queueing behavior and performance
306 * not depend upon such differences.
307 */
308 #define _SK_MEM_PACKETS 256
309 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
310 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
311 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
312
313 /* Run time adjustable parameters. */
314 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
315 EXPORT_SYMBOL(sysctl_wmem_max);
316 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
317 EXPORT_SYMBOL(sysctl_rmem_max);
318 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
319 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
320
321 /* Maximal space eaten by iovec or ancillary data plus some space */
322 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
323 EXPORT_SYMBOL(sysctl_optmem_max);
324
325 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
326 EXPORT_SYMBOL_GPL(memalloc_socks);
327
328 /**
329 * sk_set_memalloc - sets %SOCK_MEMALLOC
330 * @sk: socket to set it on
331 *
332 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
333 * It's the responsibility of the admin to adjust min_free_kbytes
334 * to meet the requirements
335 */
336 void sk_set_memalloc(struct sock *sk)
337 {
338 sock_set_flag(sk, SOCK_MEMALLOC);
339 sk->sk_allocation |= __GFP_MEMALLOC;
340 static_key_slow_inc(&memalloc_socks);
341 }
342 EXPORT_SYMBOL_GPL(sk_set_memalloc);
343
344 void sk_clear_memalloc(struct sock *sk)
345 {
346 sock_reset_flag(sk, SOCK_MEMALLOC);
347 sk->sk_allocation &= ~__GFP_MEMALLOC;
348 static_key_slow_dec(&memalloc_socks);
349
350 /*
351 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
352 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
353 * it has rmem allocations there is a risk that the user of the
354 * socket cannot make forward progress due to exceeding the rmem
355 * limits. By rights, sk_clear_memalloc() should only be called
356 * on sockets being torn down but warn and reset the accounting if
357 * that assumption breaks.
358 */
359 if (WARN_ON(sk->sk_forward_alloc))
360 sk_mem_reclaim(sk);
361 }
362 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
363
364 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
365 {
366 int ret;
367 unsigned long pflags = current->flags;
368
369 /* these should have been dropped before queueing */
370 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
371
372 current->flags |= PF_MEMALLOC;
373 ret = sk->sk_backlog_rcv(sk, skb);
374 tsk_restore_flags(current, pflags, PF_MEMALLOC);
375
376 return ret;
377 }
378 EXPORT_SYMBOL(__sk_backlog_rcv);
379
380 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
381 {
382 struct timeval tv;
383
384 if (optlen < sizeof(tv))
385 return -EINVAL;
386 if (copy_from_user(&tv, optval, sizeof(tv)))
387 return -EFAULT;
388 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
389 return -EDOM;
390
391 if (tv.tv_sec < 0) {
392 static int warned __read_mostly;
393
394 *timeo_p = 0;
395 if (warned < 10 && net_ratelimit()) {
396 warned++;
397 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
398 __func__, current->comm, task_pid_nr(current));
399 }
400 return 0;
401 }
402 *timeo_p = MAX_SCHEDULE_TIMEOUT;
403 if (tv.tv_sec == 0 && tv.tv_usec == 0)
404 return 0;
405 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
406 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
407 return 0;
408 }
409
410 static void sock_warn_obsolete_bsdism(const char *name)
411 {
412 static int warned;
413 static char warncomm[TASK_COMM_LEN];
414 if (strcmp(warncomm, current->comm) && warned < 5) {
415 strcpy(warncomm, current->comm);
416 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
417 warncomm, name);
418 warned++;
419 }
420 }
421
422 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
423 {
424 if (sk->sk_flags & flags) {
425 sk->sk_flags &= ~flags;
426 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
427 net_disable_timestamp();
428 }
429 }
430
431
432 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
433 {
434 int err;
435 int skb_len;
436 unsigned long flags;
437 struct sk_buff_head *list = &sk->sk_receive_queue;
438
439 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
440 atomic_inc(&sk->sk_drops);
441 trace_sock_rcvqueue_full(sk, skb);
442 return -ENOMEM;
443 }
444
445 err = sk_filter(sk, skb);
446 if (err)
447 return err;
448
449 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
450 atomic_inc(&sk->sk_drops);
451 return -ENOBUFS;
452 }
453
454 skb->dev = NULL;
455 skb_set_owner_r(skb, sk);
456
457 /* Cache the SKB length before we tack it onto the receive
458 * queue. Once it is added it no longer belongs to us and
459 * may be freed by other threads of control pulling packets
460 * from the queue.
461 */
462 skb_len = skb->len;
463
464 /* we escape from rcu protected region, make sure we dont leak
465 * a norefcounted dst
466 */
467 skb_dst_force(skb);
468
469 spin_lock_irqsave(&list->lock, flags);
470 skb->dropcount = atomic_read(&sk->sk_drops);
471 __skb_queue_tail(list, skb);
472 spin_unlock_irqrestore(&list->lock, flags);
473
474 if (!sock_flag(sk, SOCK_DEAD))
475 sk->sk_data_ready(sk, skb_len);
476 return 0;
477 }
478 EXPORT_SYMBOL(sock_queue_rcv_skb);
479
480 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
481 {
482 int rc = NET_RX_SUCCESS;
483
484 if (sk_filter(sk, skb))
485 goto discard_and_relse;
486
487 skb->dev = NULL;
488
489 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
490 atomic_inc(&sk->sk_drops);
491 goto discard_and_relse;
492 }
493 if (nested)
494 bh_lock_sock_nested(sk);
495 else
496 bh_lock_sock(sk);
497 if (!sock_owned_by_user(sk)) {
498 /*
499 * trylock + unlock semantics:
500 */
501 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
502
503 rc = sk_backlog_rcv(sk, skb);
504
505 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
506 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
507 bh_unlock_sock(sk);
508 atomic_inc(&sk->sk_drops);
509 goto discard_and_relse;
510 }
511
512 bh_unlock_sock(sk);
513 out:
514 sock_put(sk);
515 return rc;
516 discard_and_relse:
517 kfree_skb(skb);
518 goto out;
519 }
520 EXPORT_SYMBOL(sk_receive_skb);
521
522 void sk_reset_txq(struct sock *sk)
523 {
524 sk_tx_queue_clear(sk);
525 }
526 EXPORT_SYMBOL(sk_reset_txq);
527
528 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
529 {
530 struct dst_entry *dst = __sk_dst_get(sk);
531
532 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
533 sk_tx_queue_clear(sk);
534 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
535 dst_release(dst);
536 return NULL;
537 }
538
539 return dst;
540 }
541 EXPORT_SYMBOL(__sk_dst_check);
542
543 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
544 {
545 struct dst_entry *dst = sk_dst_get(sk);
546
547 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
548 sk_dst_reset(sk);
549 dst_release(dst);
550 return NULL;
551 }
552
553 return dst;
554 }
555 EXPORT_SYMBOL(sk_dst_check);
556
557 static int sock_setbindtodevice(struct sock *sk, char __user *optval,
558 int optlen)
559 {
560 int ret = -ENOPROTOOPT;
561 #ifdef CONFIG_NETDEVICES
562 struct net *net = sock_net(sk);
563 char devname[IFNAMSIZ];
564 int index;
565
566 /* Sorry... */
567 ret = -EPERM;
568 if (!ns_capable(net->user_ns, CAP_NET_RAW))
569 goto out;
570
571 ret = -EINVAL;
572 if (optlen < 0)
573 goto out;
574
575 /* Bind this socket to a particular device like "eth0",
576 * as specified in the passed interface name. If the
577 * name is "" or the option length is zero the socket
578 * is not bound.
579 */
580 if (optlen > IFNAMSIZ - 1)
581 optlen = IFNAMSIZ - 1;
582 memset(devname, 0, sizeof(devname));
583
584 ret = -EFAULT;
585 if (copy_from_user(devname, optval, optlen))
586 goto out;
587
588 index = 0;
589 if (devname[0] != '\0') {
590 struct net_device *dev;
591
592 rcu_read_lock();
593 dev = dev_get_by_name_rcu(net, devname);
594 if (dev)
595 index = dev->ifindex;
596 rcu_read_unlock();
597 ret = -ENODEV;
598 if (!dev)
599 goto out;
600 }
601
602 lock_sock(sk);
603 sk->sk_bound_dev_if = index;
604 sk_dst_reset(sk);
605 release_sock(sk);
606
607 ret = 0;
608
609 out:
610 #endif
611
612 return ret;
613 }
614
615 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
616 int __user *optlen, int len)
617 {
618 int ret = -ENOPROTOOPT;
619 #ifdef CONFIG_NETDEVICES
620 struct net *net = sock_net(sk);
621 char devname[IFNAMSIZ];
622
623 if (sk->sk_bound_dev_if == 0) {
624 len = 0;
625 goto zero;
626 }
627
628 ret = -EINVAL;
629 if (len < IFNAMSIZ)
630 goto out;
631
632 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
633 if (ret)
634 goto out;
635
636 len = strlen(devname) + 1;
637
638 ret = -EFAULT;
639 if (copy_to_user(optval, devname, len))
640 goto out;
641
642 zero:
643 ret = -EFAULT;
644 if (put_user(len, optlen))
645 goto out;
646
647 ret = 0;
648
649 out:
650 #endif
651
652 return ret;
653 }
654
655 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
656 {
657 if (valbool)
658 sock_set_flag(sk, bit);
659 else
660 sock_reset_flag(sk, bit);
661 }
662
663 /*
664 * This is meant for all protocols to use and covers goings on
665 * at the socket level. Everything here is generic.
666 */
667
668 int sock_setsockopt(struct socket *sock, int level, int optname,
669 char __user *optval, unsigned int optlen)
670 {
671 struct sock *sk = sock->sk;
672 int val;
673 int valbool;
674 struct linger ling;
675 int ret = 0;
676
677 /*
678 * Options without arguments
679 */
680
681 if (optname == SO_BINDTODEVICE)
682 return sock_setbindtodevice(sk, optval, optlen);
683
684 if (optlen < sizeof(int))
685 return -EINVAL;
686
687 if (get_user(val, (int __user *)optval))
688 return -EFAULT;
689
690 valbool = val ? 1 : 0;
691
692 lock_sock(sk);
693
694 switch (optname) {
695 case SO_DEBUG:
696 if (val && !capable(CAP_NET_ADMIN))
697 ret = -EACCES;
698 else
699 sock_valbool_flag(sk, SOCK_DBG, valbool);
700 break;
701 case SO_REUSEADDR:
702 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
703 break;
704 case SO_REUSEPORT:
705 sk->sk_reuseport = valbool;
706 break;
707 case SO_TYPE:
708 case SO_PROTOCOL:
709 case SO_DOMAIN:
710 case SO_ERROR:
711 ret = -ENOPROTOOPT;
712 break;
713 case SO_DONTROUTE:
714 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
715 break;
716 case SO_BROADCAST:
717 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
718 break;
719 case SO_SNDBUF:
720 /* Don't error on this BSD doesn't and if you think
721 * about it this is right. Otherwise apps have to
722 * play 'guess the biggest size' games. RCVBUF/SNDBUF
723 * are treated in BSD as hints
724 */
725 val = min_t(u32, val, sysctl_wmem_max);
726 set_sndbuf:
727 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
728 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
729 /* Wake up sending tasks if we upped the value. */
730 sk->sk_write_space(sk);
731 break;
732
733 case SO_SNDBUFFORCE:
734 if (!capable(CAP_NET_ADMIN)) {
735 ret = -EPERM;
736 break;
737 }
738 goto set_sndbuf;
739
740 case SO_RCVBUF:
741 /* Don't error on this BSD doesn't and if you think
742 * about it this is right. Otherwise apps have to
743 * play 'guess the biggest size' games. RCVBUF/SNDBUF
744 * are treated in BSD as hints
745 */
746 val = min_t(u32, val, sysctl_rmem_max);
747 set_rcvbuf:
748 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
749 /*
750 * We double it on the way in to account for
751 * "struct sk_buff" etc. overhead. Applications
752 * assume that the SO_RCVBUF setting they make will
753 * allow that much actual data to be received on that
754 * socket.
755 *
756 * Applications are unaware that "struct sk_buff" and
757 * other overheads allocate from the receive buffer
758 * during socket buffer allocation.
759 *
760 * And after considering the possible alternatives,
761 * returning the value we actually used in getsockopt
762 * is the most desirable behavior.
763 */
764 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
765 break;
766
767 case SO_RCVBUFFORCE:
768 if (!capable(CAP_NET_ADMIN)) {
769 ret = -EPERM;
770 break;
771 }
772 goto set_rcvbuf;
773
774 case SO_KEEPALIVE:
775 #ifdef CONFIG_INET
776 if (sk->sk_protocol == IPPROTO_TCP &&
777 sk->sk_type == SOCK_STREAM)
778 tcp_set_keepalive(sk, valbool);
779 #endif
780 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
781 break;
782
783 case SO_OOBINLINE:
784 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
785 break;
786
787 case SO_NO_CHECK:
788 sk->sk_no_check = valbool;
789 break;
790
791 case SO_PRIORITY:
792 if ((val >= 0 && val <= 6) ||
793 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
794 sk->sk_priority = val;
795 else
796 ret = -EPERM;
797 break;
798
799 case SO_LINGER:
800 if (optlen < sizeof(ling)) {
801 ret = -EINVAL; /* 1003.1g */
802 break;
803 }
804 if (copy_from_user(&ling, optval, sizeof(ling))) {
805 ret = -EFAULT;
806 break;
807 }
808 if (!ling.l_onoff)
809 sock_reset_flag(sk, SOCK_LINGER);
810 else {
811 #if (BITS_PER_LONG == 32)
812 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
813 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
814 else
815 #endif
816 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
817 sock_set_flag(sk, SOCK_LINGER);
818 }
819 break;
820
821 case SO_BSDCOMPAT:
822 sock_warn_obsolete_bsdism("setsockopt");
823 break;
824
825 case SO_PASSCRED:
826 if (valbool)
827 set_bit(SOCK_PASSCRED, &sock->flags);
828 else
829 clear_bit(SOCK_PASSCRED, &sock->flags);
830 break;
831
832 case SO_TIMESTAMP:
833 case SO_TIMESTAMPNS:
834 if (valbool) {
835 if (optname == SO_TIMESTAMP)
836 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
837 else
838 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
839 sock_set_flag(sk, SOCK_RCVTSTAMP);
840 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
841 } else {
842 sock_reset_flag(sk, SOCK_RCVTSTAMP);
843 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
844 }
845 break;
846
847 case SO_TIMESTAMPING:
848 if (val & ~SOF_TIMESTAMPING_MASK) {
849 ret = -EINVAL;
850 break;
851 }
852 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
853 val & SOF_TIMESTAMPING_TX_HARDWARE);
854 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
855 val & SOF_TIMESTAMPING_TX_SOFTWARE);
856 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
857 val & SOF_TIMESTAMPING_RX_HARDWARE);
858 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
859 sock_enable_timestamp(sk,
860 SOCK_TIMESTAMPING_RX_SOFTWARE);
861 else
862 sock_disable_timestamp(sk,
863 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
864 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
865 val & SOF_TIMESTAMPING_SOFTWARE);
866 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
867 val & SOF_TIMESTAMPING_SYS_HARDWARE);
868 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
869 val & SOF_TIMESTAMPING_RAW_HARDWARE);
870 break;
871
872 case SO_RCVLOWAT:
873 if (val < 0)
874 val = INT_MAX;
875 sk->sk_rcvlowat = val ? : 1;
876 break;
877
878 case SO_RCVTIMEO:
879 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
880 break;
881
882 case SO_SNDTIMEO:
883 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
884 break;
885
886 case SO_ATTACH_FILTER:
887 ret = -EINVAL;
888 if (optlen == sizeof(struct sock_fprog)) {
889 struct sock_fprog fprog;
890
891 ret = -EFAULT;
892 if (copy_from_user(&fprog, optval, sizeof(fprog)))
893 break;
894
895 ret = sk_attach_filter(&fprog, sk);
896 }
897 break;
898
899 case SO_DETACH_FILTER:
900 ret = sk_detach_filter(sk);
901 break;
902
903 case SO_LOCK_FILTER:
904 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
905 ret = -EPERM;
906 else
907 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
908 break;
909
910 case SO_PASSSEC:
911 if (valbool)
912 set_bit(SOCK_PASSSEC, &sock->flags);
913 else
914 clear_bit(SOCK_PASSSEC, &sock->flags);
915 break;
916 case SO_MARK:
917 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
918 ret = -EPERM;
919 else
920 sk->sk_mark = val;
921 break;
922
923 /* We implement the SO_SNDLOWAT etc to
924 not be settable (1003.1g 5.3) */
925 case SO_RXQ_OVFL:
926 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
927 break;
928
929 case SO_WIFI_STATUS:
930 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
931 break;
932
933 case SO_PEEK_OFF:
934 if (sock->ops->set_peek_off)
935 ret = sock->ops->set_peek_off(sk, val);
936 else
937 ret = -EOPNOTSUPP;
938 break;
939
940 case SO_NOFCS:
941 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
942 break;
943
944 case SO_SELECT_ERR_QUEUE:
945 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
946 break;
947
948 default:
949 ret = -ENOPROTOOPT;
950 break;
951 }
952 release_sock(sk);
953 return ret;
954 }
955 EXPORT_SYMBOL(sock_setsockopt);
956
957
958 void cred_to_ucred(struct pid *pid, const struct cred *cred,
959 struct ucred *ucred)
960 {
961 ucred->pid = pid_vnr(pid);
962 ucred->uid = ucred->gid = -1;
963 if (cred) {
964 struct user_namespace *current_ns = current_user_ns();
965
966 ucred->uid = from_kuid_munged(current_ns, cred->euid);
967 ucred->gid = from_kgid_munged(current_ns, cred->egid);
968 }
969 }
970 EXPORT_SYMBOL_GPL(cred_to_ucred);
971
972 int sock_getsockopt(struct socket *sock, int level, int optname,
973 char __user *optval, int __user *optlen)
974 {
975 struct sock *sk = sock->sk;
976
977 union {
978 int val;
979 struct linger ling;
980 struct timeval tm;
981 } v;
982
983 int lv = sizeof(int);
984 int len;
985
986 if (get_user(len, optlen))
987 return -EFAULT;
988 if (len < 0)
989 return -EINVAL;
990
991 memset(&v, 0, sizeof(v));
992
993 switch (optname) {
994 case SO_DEBUG:
995 v.val = sock_flag(sk, SOCK_DBG);
996 break;
997
998 case SO_DONTROUTE:
999 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1000 break;
1001
1002 case SO_BROADCAST:
1003 v.val = sock_flag(sk, SOCK_BROADCAST);
1004 break;
1005
1006 case SO_SNDBUF:
1007 v.val = sk->sk_sndbuf;
1008 break;
1009
1010 case SO_RCVBUF:
1011 v.val = sk->sk_rcvbuf;
1012 break;
1013
1014 case SO_REUSEADDR:
1015 v.val = sk->sk_reuse;
1016 break;
1017
1018 case SO_REUSEPORT:
1019 v.val = sk->sk_reuseport;
1020 break;
1021
1022 case SO_KEEPALIVE:
1023 v.val = sock_flag(sk, SOCK_KEEPOPEN);
1024 break;
1025
1026 case SO_TYPE:
1027 v.val = sk->sk_type;
1028 break;
1029
1030 case SO_PROTOCOL:
1031 v.val = sk->sk_protocol;
1032 break;
1033
1034 case SO_DOMAIN:
1035 v.val = sk->sk_family;
1036 break;
1037
1038 case SO_ERROR:
1039 v.val = -sock_error(sk);
1040 if (v.val == 0)
1041 v.val = xchg(&sk->sk_err_soft, 0);
1042 break;
1043
1044 case SO_OOBINLINE:
1045 v.val = sock_flag(sk, SOCK_URGINLINE);
1046 break;
1047
1048 case SO_NO_CHECK:
1049 v.val = sk->sk_no_check;
1050 break;
1051
1052 case SO_PRIORITY:
1053 v.val = sk->sk_priority;
1054 break;
1055
1056 case SO_LINGER:
1057 lv = sizeof(v.ling);
1058 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
1059 v.ling.l_linger = sk->sk_lingertime / HZ;
1060 break;
1061
1062 case SO_BSDCOMPAT:
1063 sock_warn_obsolete_bsdism("getsockopt");
1064 break;
1065
1066 case SO_TIMESTAMP:
1067 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1068 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1069 break;
1070
1071 case SO_TIMESTAMPNS:
1072 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1073 break;
1074
1075 case SO_TIMESTAMPING:
1076 v.val = 0;
1077 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
1078 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
1079 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
1080 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
1081 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
1082 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
1083 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1084 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
1085 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
1086 v.val |= SOF_TIMESTAMPING_SOFTWARE;
1087 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
1088 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
1089 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
1090 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
1091 break;
1092
1093 case SO_RCVTIMEO:
1094 lv = sizeof(struct timeval);
1095 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1096 v.tm.tv_sec = 0;
1097 v.tm.tv_usec = 0;
1098 } else {
1099 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1100 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1101 }
1102 break;
1103
1104 case SO_SNDTIMEO:
1105 lv = sizeof(struct timeval);
1106 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1107 v.tm.tv_sec = 0;
1108 v.tm.tv_usec = 0;
1109 } else {
1110 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1111 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1112 }
1113 break;
1114
1115 case SO_RCVLOWAT:
1116 v.val = sk->sk_rcvlowat;
1117 break;
1118
1119 case SO_SNDLOWAT:
1120 v.val = 1;
1121 break;
1122
1123 case SO_PASSCRED:
1124 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1125 break;
1126
1127 case SO_PEERCRED:
1128 {
1129 struct ucred peercred;
1130 if (len > sizeof(peercred))
1131 len = sizeof(peercred);
1132 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1133 if (copy_to_user(optval, &peercred, len))
1134 return -EFAULT;
1135 goto lenout;
1136 }
1137
1138 case SO_PEERNAME:
1139 {
1140 char address[128];
1141
1142 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1143 return -ENOTCONN;
1144 if (lv < len)
1145 return -EINVAL;
1146 if (copy_to_user(optval, address, len))
1147 return -EFAULT;
1148 goto lenout;
1149 }
1150
1151 /* Dubious BSD thing... Probably nobody even uses it, but
1152 * the UNIX standard wants it for whatever reason... -DaveM
1153 */
1154 case SO_ACCEPTCONN:
1155 v.val = sk->sk_state == TCP_LISTEN;
1156 break;
1157
1158 case SO_PASSSEC:
1159 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1160 break;
1161
1162 case SO_PEERSEC:
1163 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1164
1165 case SO_MARK:
1166 v.val = sk->sk_mark;
1167 break;
1168
1169 case SO_RXQ_OVFL:
1170 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1171 break;
1172
1173 case SO_WIFI_STATUS:
1174 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1175 break;
1176
1177 case SO_PEEK_OFF:
1178 if (!sock->ops->set_peek_off)
1179 return -EOPNOTSUPP;
1180
1181 v.val = sk->sk_peek_off;
1182 break;
1183 case SO_NOFCS:
1184 v.val = sock_flag(sk, SOCK_NOFCS);
1185 break;
1186
1187 case SO_BINDTODEVICE:
1188 return sock_getbindtodevice(sk, optval, optlen, len);
1189
1190 case SO_GET_FILTER:
1191 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1192 if (len < 0)
1193 return len;
1194
1195 goto lenout;
1196
1197 case SO_LOCK_FILTER:
1198 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1199 break;
1200
1201 case SO_SELECT_ERR_QUEUE:
1202 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1203 break;
1204
1205 default:
1206 return -ENOPROTOOPT;
1207 }
1208
1209 if (len > lv)
1210 len = lv;
1211 if (copy_to_user(optval, &v, len))
1212 return -EFAULT;
1213 lenout:
1214 if (put_user(len, optlen))
1215 return -EFAULT;
1216 return 0;
1217 }
1218
1219 /*
1220 * Initialize an sk_lock.
1221 *
1222 * (We also register the sk_lock with the lock validator.)
1223 */
1224 static inline void sock_lock_init(struct sock *sk)
1225 {
1226 sock_lock_init_class_and_name(sk,
1227 af_family_slock_key_strings[sk->sk_family],
1228 af_family_slock_keys + sk->sk_family,
1229 af_family_key_strings[sk->sk_family],
1230 af_family_keys + sk->sk_family);
1231 }
1232
1233 /*
1234 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1235 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1236 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1237 */
1238 static void sock_copy(struct sock *nsk, const struct sock *osk)
1239 {
1240 #ifdef CONFIG_SECURITY_NETWORK
1241 void *sptr = nsk->sk_security;
1242 #endif
1243 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1244
1245 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1246 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1247
1248 #ifdef CONFIG_SECURITY_NETWORK
1249 nsk->sk_security = sptr;
1250 security_sk_clone(osk, nsk);
1251 #endif
1252 }
1253
1254 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1255 {
1256 unsigned long nulls1, nulls2;
1257
1258 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1259 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1260 if (nulls1 > nulls2)
1261 swap(nulls1, nulls2);
1262
1263 if (nulls1 != 0)
1264 memset((char *)sk, 0, nulls1);
1265 memset((char *)sk + nulls1 + sizeof(void *), 0,
1266 nulls2 - nulls1 - sizeof(void *));
1267 memset((char *)sk + nulls2 + sizeof(void *), 0,
1268 size - nulls2 - sizeof(void *));
1269 }
1270 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1271
1272 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1273 int family)
1274 {
1275 struct sock *sk;
1276 struct kmem_cache *slab;
1277
1278 slab = prot->slab;
1279 if (slab != NULL) {
1280 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1281 if (!sk)
1282 return sk;
1283 if (priority & __GFP_ZERO) {
1284 if (prot->clear_sk)
1285 prot->clear_sk(sk, prot->obj_size);
1286 else
1287 sk_prot_clear_nulls(sk, prot->obj_size);
1288 }
1289 } else
1290 sk = kmalloc(prot->obj_size, priority);
1291
1292 if (sk != NULL) {
1293 kmemcheck_annotate_bitfield(sk, flags);
1294
1295 if (security_sk_alloc(sk, family, priority))
1296 goto out_free;
1297
1298 if (!try_module_get(prot->owner))
1299 goto out_free_sec;
1300 sk_tx_queue_clear(sk);
1301 }
1302
1303 return sk;
1304
1305 out_free_sec:
1306 security_sk_free(sk);
1307 out_free:
1308 if (slab != NULL)
1309 kmem_cache_free(slab, sk);
1310 else
1311 kfree(sk);
1312 return NULL;
1313 }
1314
1315 static void sk_prot_free(struct proto *prot, struct sock *sk)
1316 {
1317 struct kmem_cache *slab;
1318 struct module *owner;
1319
1320 owner = prot->owner;
1321 slab = prot->slab;
1322
1323 security_sk_free(sk);
1324 if (slab != NULL)
1325 kmem_cache_free(slab, sk);
1326 else
1327 kfree(sk);
1328 module_put(owner);
1329 }
1330
1331 #if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
1332 void sock_update_classid(struct sock *sk)
1333 {
1334 u32 classid;
1335
1336 classid = task_cls_classid(current);
1337 if (classid != sk->sk_classid)
1338 sk->sk_classid = classid;
1339 }
1340 EXPORT_SYMBOL(sock_update_classid);
1341 #endif
1342
1343 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1344 void sock_update_netprioidx(struct sock *sk)
1345 {
1346 if (in_interrupt())
1347 return;
1348
1349 sk->sk_cgrp_prioidx = task_netprioidx(current);
1350 }
1351 EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1352 #endif
1353
1354 /**
1355 * sk_alloc - All socket objects are allocated here
1356 * @net: the applicable net namespace
1357 * @family: protocol family
1358 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1359 * @prot: struct proto associated with this new sock instance
1360 */
1361 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1362 struct proto *prot)
1363 {
1364 struct sock *sk;
1365
1366 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1367 if (sk) {
1368 sk->sk_family = family;
1369 /*
1370 * See comment in struct sock definition to understand
1371 * why we need sk_prot_creator -acme
1372 */
1373 sk->sk_prot = sk->sk_prot_creator = prot;
1374 sock_lock_init(sk);
1375 sock_net_set(sk, get_net(net));
1376 atomic_set(&sk->sk_wmem_alloc, 1);
1377
1378 sock_update_classid(sk);
1379 sock_update_netprioidx(sk);
1380 }
1381
1382 return sk;
1383 }
1384 EXPORT_SYMBOL(sk_alloc);
1385
1386 static void __sk_free(struct sock *sk)
1387 {
1388 struct sk_filter *filter;
1389
1390 if (sk->sk_destruct)
1391 sk->sk_destruct(sk);
1392
1393 filter = rcu_dereference_check(sk->sk_filter,
1394 atomic_read(&sk->sk_wmem_alloc) == 0);
1395 if (filter) {
1396 sk_filter_uncharge(sk, filter);
1397 RCU_INIT_POINTER(sk->sk_filter, NULL);
1398 }
1399
1400 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1401
1402 if (atomic_read(&sk->sk_omem_alloc))
1403 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1404 __func__, atomic_read(&sk->sk_omem_alloc));
1405
1406 if (sk->sk_peer_cred)
1407 put_cred(sk->sk_peer_cred);
1408 put_pid(sk->sk_peer_pid);
1409 put_net(sock_net(sk));
1410 sk_prot_free(sk->sk_prot_creator, sk);
1411 }
1412
1413 void sk_free(struct sock *sk)
1414 {
1415 /*
1416 * We subtract one from sk_wmem_alloc and can know if
1417 * some packets are still in some tx queue.
1418 * If not null, sock_wfree() will call __sk_free(sk) later
1419 */
1420 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1421 __sk_free(sk);
1422 }
1423 EXPORT_SYMBOL(sk_free);
1424
1425 /*
1426 * Last sock_put should drop reference to sk->sk_net. It has already
1427 * been dropped in sk_change_net. Taking reference to stopping namespace
1428 * is not an option.
1429 * Take reference to a socket to remove it from hash _alive_ and after that
1430 * destroy it in the context of init_net.
1431 */
1432 void sk_release_kernel(struct sock *sk)
1433 {
1434 if (sk == NULL || sk->sk_socket == NULL)
1435 return;
1436
1437 sock_hold(sk);
1438 sock_release(sk->sk_socket);
1439 release_net(sock_net(sk));
1440 sock_net_set(sk, get_net(&init_net));
1441 sock_put(sk);
1442 }
1443 EXPORT_SYMBOL(sk_release_kernel);
1444
1445 static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1446 {
1447 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1448 sock_update_memcg(newsk);
1449 }
1450
1451 /**
1452 * sk_clone_lock - clone a socket, and lock its clone
1453 * @sk: the socket to clone
1454 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1455 *
1456 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1457 */
1458 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1459 {
1460 struct sock *newsk;
1461
1462 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1463 if (newsk != NULL) {
1464 struct sk_filter *filter;
1465
1466 sock_copy(newsk, sk);
1467
1468 /* SANITY */
1469 get_net(sock_net(newsk));
1470 sk_node_init(&newsk->sk_node);
1471 sock_lock_init(newsk);
1472 bh_lock_sock(newsk);
1473 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1474 newsk->sk_backlog.len = 0;
1475
1476 atomic_set(&newsk->sk_rmem_alloc, 0);
1477 /*
1478 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1479 */
1480 atomic_set(&newsk->sk_wmem_alloc, 1);
1481 atomic_set(&newsk->sk_omem_alloc, 0);
1482 skb_queue_head_init(&newsk->sk_receive_queue);
1483 skb_queue_head_init(&newsk->sk_write_queue);
1484 #ifdef CONFIG_NET_DMA
1485 skb_queue_head_init(&newsk->sk_async_wait_queue);
1486 #endif
1487
1488 spin_lock_init(&newsk->sk_dst_lock);
1489 rwlock_init(&newsk->sk_callback_lock);
1490 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1491 af_callback_keys + newsk->sk_family,
1492 af_family_clock_key_strings[newsk->sk_family]);
1493
1494 newsk->sk_dst_cache = NULL;
1495 newsk->sk_wmem_queued = 0;
1496 newsk->sk_forward_alloc = 0;
1497 newsk->sk_send_head = NULL;
1498 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1499
1500 sock_reset_flag(newsk, SOCK_DONE);
1501 skb_queue_head_init(&newsk->sk_error_queue);
1502
1503 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1504 if (filter != NULL)
1505 sk_filter_charge(newsk, filter);
1506
1507 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1508 /* It is still raw copy of parent, so invalidate
1509 * destructor and make plain sk_free() */
1510 newsk->sk_destruct = NULL;
1511 bh_unlock_sock(newsk);
1512 sk_free(newsk);
1513 newsk = NULL;
1514 goto out;
1515 }
1516
1517 newsk->sk_err = 0;
1518 newsk->sk_err_soft = 0;
1519 newsk->sk_priority = 0;
1520 /*
1521 * Before updating sk_refcnt, we must commit prior changes to memory
1522 * (Documentation/RCU/rculist_nulls.txt for details)
1523 */
1524 smp_wmb();
1525 atomic_set(&newsk->sk_refcnt, 2);
1526
1527 /*
1528 * Increment the counter in the same struct proto as the master
1529 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1530 * is the same as sk->sk_prot->socks, as this field was copied
1531 * with memcpy).
1532 *
1533 * This _changes_ the previous behaviour, where
1534 * tcp_create_openreq_child always was incrementing the
1535 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1536 * to be taken into account in all callers. -acme
1537 */
1538 sk_refcnt_debug_inc(newsk);
1539 sk_set_socket(newsk, NULL);
1540 newsk->sk_wq = NULL;
1541
1542 sk_update_clone(sk, newsk);
1543
1544 if (newsk->sk_prot->sockets_allocated)
1545 sk_sockets_allocated_inc(newsk);
1546
1547 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1548 net_enable_timestamp();
1549 }
1550 out:
1551 return newsk;
1552 }
1553 EXPORT_SYMBOL_GPL(sk_clone_lock);
1554
1555 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1556 {
1557 __sk_dst_set(sk, dst);
1558 sk->sk_route_caps = dst->dev->features;
1559 if (sk->sk_route_caps & NETIF_F_GSO)
1560 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1561 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1562 if (sk_can_gso(sk)) {
1563 if (dst->header_len) {
1564 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1565 } else {
1566 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1567 sk->sk_gso_max_size = dst->dev->gso_max_size;
1568 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1569 }
1570 }
1571 }
1572 EXPORT_SYMBOL_GPL(sk_setup_caps);
1573
1574 /*
1575 * Simple resource managers for sockets.
1576 */
1577
1578
1579 /*
1580 * Write buffer destructor automatically called from kfree_skb.
1581 */
1582 void sock_wfree(struct sk_buff *skb)
1583 {
1584 struct sock *sk = skb->sk;
1585 unsigned int len = skb->truesize;
1586
1587 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1588 /*
1589 * Keep a reference on sk_wmem_alloc, this will be released
1590 * after sk_write_space() call
1591 */
1592 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1593 sk->sk_write_space(sk);
1594 len = 1;
1595 }
1596 /*
1597 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1598 * could not do because of in-flight packets
1599 */
1600 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1601 __sk_free(sk);
1602 }
1603 EXPORT_SYMBOL(sock_wfree);
1604
1605 /*
1606 * Read buffer destructor automatically called from kfree_skb.
1607 */
1608 void sock_rfree(struct sk_buff *skb)
1609 {
1610 struct sock *sk = skb->sk;
1611 unsigned int len = skb->truesize;
1612
1613 atomic_sub(len, &sk->sk_rmem_alloc);
1614 sk_mem_uncharge(sk, len);
1615 }
1616 EXPORT_SYMBOL(sock_rfree);
1617
1618 void sock_edemux(struct sk_buff *skb)
1619 {
1620 struct sock *sk = skb->sk;
1621
1622 #ifdef CONFIG_INET
1623 if (sk->sk_state == TCP_TIME_WAIT)
1624 inet_twsk_put(inet_twsk(sk));
1625 else
1626 #endif
1627 sock_put(sk);
1628 }
1629 EXPORT_SYMBOL(sock_edemux);
1630
1631 kuid_t sock_i_uid(struct sock *sk)
1632 {
1633 kuid_t uid;
1634
1635 read_lock_bh(&sk->sk_callback_lock);
1636 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1637 read_unlock_bh(&sk->sk_callback_lock);
1638 return uid;
1639 }
1640 EXPORT_SYMBOL(sock_i_uid);
1641
1642 unsigned long sock_i_ino(struct sock *sk)
1643 {
1644 unsigned long ino;
1645
1646 read_lock_bh(&sk->sk_callback_lock);
1647 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1648 read_unlock_bh(&sk->sk_callback_lock);
1649 return ino;
1650 }
1651 EXPORT_SYMBOL(sock_i_ino);
1652
1653 /*
1654 * Allocate a skb from the socket's send buffer.
1655 */
1656 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1657 gfp_t priority)
1658 {
1659 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1660 struct sk_buff *skb = alloc_skb(size, priority);
1661 if (skb) {
1662 skb_set_owner_w(skb, sk);
1663 return skb;
1664 }
1665 }
1666 return NULL;
1667 }
1668 EXPORT_SYMBOL(sock_wmalloc);
1669
1670 /*
1671 * Allocate a skb from the socket's receive buffer.
1672 */
1673 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1674 gfp_t priority)
1675 {
1676 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1677 struct sk_buff *skb = alloc_skb(size, priority);
1678 if (skb) {
1679 skb_set_owner_r(skb, sk);
1680 return skb;
1681 }
1682 }
1683 return NULL;
1684 }
1685
1686 /*
1687 * Allocate a memory block from the socket's option memory buffer.
1688 */
1689 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1690 {
1691 if ((unsigned int)size <= sysctl_optmem_max &&
1692 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1693 void *mem;
1694 /* First do the add, to avoid the race if kmalloc
1695 * might sleep.
1696 */
1697 atomic_add(size, &sk->sk_omem_alloc);
1698 mem = kmalloc(size, priority);
1699 if (mem)
1700 return mem;
1701 atomic_sub(size, &sk->sk_omem_alloc);
1702 }
1703 return NULL;
1704 }
1705 EXPORT_SYMBOL(sock_kmalloc);
1706
1707 /*
1708 * Free an option memory block.
1709 */
1710 void sock_kfree_s(struct sock *sk, void *mem, int size)
1711 {
1712 kfree(mem);
1713 atomic_sub(size, &sk->sk_omem_alloc);
1714 }
1715 EXPORT_SYMBOL(sock_kfree_s);
1716
1717 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1718 I think, these locks should be removed for datagram sockets.
1719 */
1720 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1721 {
1722 DEFINE_WAIT(wait);
1723
1724 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1725 for (;;) {
1726 if (!timeo)
1727 break;
1728 if (signal_pending(current))
1729 break;
1730 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1731 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1732 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1733 break;
1734 if (sk->sk_shutdown & SEND_SHUTDOWN)
1735 break;
1736 if (sk->sk_err)
1737 break;
1738 timeo = schedule_timeout(timeo);
1739 }
1740 finish_wait(sk_sleep(sk), &wait);
1741 return timeo;
1742 }
1743
1744
1745 /*
1746 * Generic send/receive buffer handlers
1747 */
1748
1749 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1750 unsigned long data_len, int noblock,
1751 int *errcode)
1752 {
1753 struct sk_buff *skb;
1754 gfp_t gfp_mask;
1755 long timeo;
1756 int err;
1757 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1758
1759 err = -EMSGSIZE;
1760 if (npages > MAX_SKB_FRAGS)
1761 goto failure;
1762
1763 gfp_mask = sk->sk_allocation;
1764 if (gfp_mask & __GFP_WAIT)
1765 gfp_mask |= __GFP_REPEAT;
1766
1767 timeo = sock_sndtimeo(sk, noblock);
1768 while (1) {
1769 err = sock_error(sk);
1770 if (err != 0)
1771 goto failure;
1772
1773 err = -EPIPE;
1774 if (sk->sk_shutdown & SEND_SHUTDOWN)
1775 goto failure;
1776
1777 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1778 skb = alloc_skb(header_len, gfp_mask);
1779 if (skb) {
1780 int i;
1781
1782 /* No pages, we're done... */
1783 if (!data_len)
1784 break;
1785
1786 skb->truesize += data_len;
1787 skb_shinfo(skb)->nr_frags = npages;
1788 for (i = 0; i < npages; i++) {
1789 struct page *page;
1790
1791 page = alloc_pages(sk->sk_allocation, 0);
1792 if (!page) {
1793 err = -ENOBUFS;
1794 skb_shinfo(skb)->nr_frags = i;
1795 kfree_skb(skb);
1796 goto failure;
1797 }
1798
1799 __skb_fill_page_desc(skb, i,
1800 page, 0,
1801 (data_len >= PAGE_SIZE ?
1802 PAGE_SIZE :
1803 data_len));
1804 data_len -= PAGE_SIZE;
1805 }
1806
1807 /* Full success... */
1808 break;
1809 }
1810 err = -ENOBUFS;
1811 goto failure;
1812 }
1813 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1814 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1815 err = -EAGAIN;
1816 if (!timeo)
1817 goto failure;
1818 if (signal_pending(current))
1819 goto interrupted;
1820 timeo = sock_wait_for_wmem(sk, timeo);
1821 }
1822
1823 skb_set_owner_w(skb, sk);
1824 return skb;
1825
1826 interrupted:
1827 err = sock_intr_errno(timeo);
1828 failure:
1829 *errcode = err;
1830 return NULL;
1831 }
1832 EXPORT_SYMBOL(sock_alloc_send_pskb);
1833
1834 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1835 int noblock, int *errcode)
1836 {
1837 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1838 }
1839 EXPORT_SYMBOL(sock_alloc_send_skb);
1840
1841 /* On 32bit arches, an skb frag is limited to 2^15 */
1842 #define SKB_FRAG_PAGE_ORDER get_order(32768)
1843
1844 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1845 {
1846 int order;
1847
1848 if (pfrag->page) {
1849 if (atomic_read(&pfrag->page->_count) == 1) {
1850 pfrag->offset = 0;
1851 return true;
1852 }
1853 if (pfrag->offset < pfrag->size)
1854 return true;
1855 put_page(pfrag->page);
1856 }
1857
1858 /* We restrict high order allocations to users that can afford to wait */
1859 order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
1860
1861 do {
1862 gfp_t gfp = sk->sk_allocation;
1863
1864 if (order)
1865 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
1866 pfrag->page = alloc_pages(gfp, order);
1867 if (likely(pfrag->page)) {
1868 pfrag->offset = 0;
1869 pfrag->size = PAGE_SIZE << order;
1870 return true;
1871 }
1872 } while (--order >= 0);
1873
1874 sk_enter_memory_pressure(sk);
1875 sk_stream_moderate_sndbuf(sk);
1876 return false;
1877 }
1878 EXPORT_SYMBOL(sk_page_frag_refill);
1879
1880 static void __lock_sock(struct sock *sk)
1881 __releases(&sk->sk_lock.slock)
1882 __acquires(&sk->sk_lock.slock)
1883 {
1884 DEFINE_WAIT(wait);
1885
1886 for (;;) {
1887 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1888 TASK_UNINTERRUPTIBLE);
1889 spin_unlock_bh(&sk->sk_lock.slock);
1890 schedule();
1891 spin_lock_bh(&sk->sk_lock.slock);
1892 if (!sock_owned_by_user(sk))
1893 break;
1894 }
1895 finish_wait(&sk->sk_lock.wq, &wait);
1896 }
1897
1898 static void __release_sock(struct sock *sk)
1899 __releases(&sk->sk_lock.slock)
1900 __acquires(&sk->sk_lock.slock)
1901 {
1902 struct sk_buff *skb = sk->sk_backlog.head;
1903
1904 do {
1905 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1906 bh_unlock_sock(sk);
1907
1908 do {
1909 struct sk_buff *next = skb->next;
1910
1911 prefetch(next);
1912 WARN_ON_ONCE(skb_dst_is_noref(skb));
1913 skb->next = NULL;
1914 sk_backlog_rcv(sk, skb);
1915
1916 /*
1917 * We are in process context here with softirqs
1918 * disabled, use cond_resched_softirq() to preempt.
1919 * This is safe to do because we've taken the backlog
1920 * queue private:
1921 */
1922 cond_resched_softirq();
1923
1924 skb = next;
1925 } while (skb != NULL);
1926
1927 bh_lock_sock(sk);
1928 } while ((skb = sk->sk_backlog.head) != NULL);
1929
1930 /*
1931 * Doing the zeroing here guarantee we can not loop forever
1932 * while a wild producer attempts to flood us.
1933 */
1934 sk->sk_backlog.len = 0;
1935 }
1936
1937 /**
1938 * sk_wait_data - wait for data to arrive at sk_receive_queue
1939 * @sk: sock to wait on
1940 * @timeo: for how long
1941 *
1942 * Now socket state including sk->sk_err is changed only under lock,
1943 * hence we may omit checks after joining wait queue.
1944 * We check receive queue before schedule() only as optimization;
1945 * it is very likely that release_sock() added new data.
1946 */
1947 int sk_wait_data(struct sock *sk, long *timeo)
1948 {
1949 int rc;
1950 DEFINE_WAIT(wait);
1951
1952 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1953 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1954 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1955 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1956 finish_wait(sk_sleep(sk), &wait);
1957 return rc;
1958 }
1959 EXPORT_SYMBOL(sk_wait_data);
1960
1961 /**
1962 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1963 * @sk: socket
1964 * @size: memory size to allocate
1965 * @kind: allocation type
1966 *
1967 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1968 * rmem allocation. This function assumes that protocols which have
1969 * memory_pressure use sk_wmem_queued as write buffer accounting.
1970 */
1971 int __sk_mem_schedule(struct sock *sk, int size, int kind)
1972 {
1973 struct proto *prot = sk->sk_prot;
1974 int amt = sk_mem_pages(size);
1975 long allocated;
1976 int parent_status = UNDER_LIMIT;
1977
1978 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1979
1980 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
1981
1982 /* Under limit. */
1983 if (parent_status == UNDER_LIMIT &&
1984 allocated <= sk_prot_mem_limits(sk, 0)) {
1985 sk_leave_memory_pressure(sk);
1986 return 1;
1987 }
1988
1989 /* Under pressure. (we or our parents) */
1990 if ((parent_status > SOFT_LIMIT) ||
1991 allocated > sk_prot_mem_limits(sk, 1))
1992 sk_enter_memory_pressure(sk);
1993
1994 /* Over hard limit (we or our parents) */
1995 if ((parent_status == OVER_LIMIT) ||
1996 (allocated > sk_prot_mem_limits(sk, 2)))
1997 goto suppress_allocation;
1998
1999 /* guarantee minimum buffer size under pressure */
2000 if (kind == SK_MEM_RECV) {
2001 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2002 return 1;
2003
2004 } else { /* SK_MEM_SEND */
2005 if (sk->sk_type == SOCK_STREAM) {
2006 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2007 return 1;
2008 } else if (atomic_read(&sk->sk_wmem_alloc) <
2009 prot->sysctl_wmem[0])
2010 return 1;
2011 }
2012
2013 if (sk_has_memory_pressure(sk)) {
2014 int alloc;
2015
2016 if (!sk_under_memory_pressure(sk))
2017 return 1;
2018 alloc = sk_sockets_allocated_read_positive(sk);
2019 if (sk_prot_mem_limits(sk, 2) > alloc *
2020 sk_mem_pages(sk->sk_wmem_queued +
2021 atomic_read(&sk->sk_rmem_alloc) +
2022 sk->sk_forward_alloc))
2023 return 1;
2024 }
2025
2026 suppress_allocation:
2027
2028 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2029 sk_stream_moderate_sndbuf(sk);
2030
2031 /* Fail only if socket is _under_ its sndbuf.
2032 * In this case we cannot block, so that we have to fail.
2033 */
2034 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2035 return 1;
2036 }
2037
2038 trace_sock_exceed_buf_limit(sk, prot, allocated);
2039
2040 /* Alas. Undo changes. */
2041 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2042
2043 sk_memory_allocated_sub(sk, amt);
2044
2045 return 0;
2046 }
2047 EXPORT_SYMBOL(__sk_mem_schedule);
2048
2049 /**
2050 * __sk_reclaim - reclaim memory_allocated
2051 * @sk: socket
2052 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2053 */
2054 void __sk_mem_reclaim(struct sock *sk, int amount)
2055 {
2056 amount >>= SK_MEM_QUANTUM_SHIFT;
2057 sk_memory_allocated_sub(sk, amount);
2058 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2059
2060 if (sk_under_memory_pressure(sk) &&
2061 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2062 sk_leave_memory_pressure(sk);
2063 }
2064 EXPORT_SYMBOL(__sk_mem_reclaim);
2065
2066
2067 /*
2068 * Set of default routines for initialising struct proto_ops when
2069 * the protocol does not support a particular function. In certain
2070 * cases where it makes no sense for a protocol to have a "do nothing"
2071 * function, some default processing is provided.
2072 */
2073
2074 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2075 {
2076 return -EOPNOTSUPP;
2077 }
2078 EXPORT_SYMBOL(sock_no_bind);
2079
2080 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2081 int len, int flags)
2082 {
2083 return -EOPNOTSUPP;
2084 }
2085 EXPORT_SYMBOL(sock_no_connect);
2086
2087 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2088 {
2089 return -EOPNOTSUPP;
2090 }
2091 EXPORT_SYMBOL(sock_no_socketpair);
2092
2093 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2094 {
2095 return -EOPNOTSUPP;
2096 }
2097 EXPORT_SYMBOL(sock_no_accept);
2098
2099 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2100 int *len, int peer)
2101 {
2102 return -EOPNOTSUPP;
2103 }
2104 EXPORT_SYMBOL(sock_no_getname);
2105
2106 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2107 {
2108 return 0;
2109 }
2110 EXPORT_SYMBOL(sock_no_poll);
2111
2112 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2113 {
2114 return -EOPNOTSUPP;
2115 }
2116 EXPORT_SYMBOL(sock_no_ioctl);
2117
2118 int sock_no_listen(struct socket *sock, int backlog)
2119 {
2120 return -EOPNOTSUPP;
2121 }
2122 EXPORT_SYMBOL(sock_no_listen);
2123
2124 int sock_no_shutdown(struct socket *sock, int how)
2125 {
2126 return -EOPNOTSUPP;
2127 }
2128 EXPORT_SYMBOL(sock_no_shutdown);
2129
2130 int sock_no_setsockopt(struct socket *sock, int level, int optname,
2131 char __user *optval, unsigned int optlen)
2132 {
2133 return -EOPNOTSUPP;
2134 }
2135 EXPORT_SYMBOL(sock_no_setsockopt);
2136
2137 int sock_no_getsockopt(struct socket *sock, int level, int optname,
2138 char __user *optval, int __user *optlen)
2139 {
2140 return -EOPNOTSUPP;
2141 }
2142 EXPORT_SYMBOL(sock_no_getsockopt);
2143
2144 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2145 size_t len)
2146 {
2147 return -EOPNOTSUPP;
2148 }
2149 EXPORT_SYMBOL(sock_no_sendmsg);
2150
2151 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2152 size_t len, int flags)
2153 {
2154 return -EOPNOTSUPP;
2155 }
2156 EXPORT_SYMBOL(sock_no_recvmsg);
2157
2158 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2159 {
2160 /* Mirror missing mmap method error code */
2161 return -ENODEV;
2162 }
2163 EXPORT_SYMBOL(sock_no_mmap);
2164
2165 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2166 {
2167 ssize_t res;
2168 struct msghdr msg = {.msg_flags = flags};
2169 struct kvec iov;
2170 char *kaddr = kmap(page);
2171 iov.iov_base = kaddr + offset;
2172 iov.iov_len = size;
2173 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2174 kunmap(page);
2175 return res;
2176 }
2177 EXPORT_SYMBOL(sock_no_sendpage);
2178
2179 /*
2180 * Default Socket Callbacks
2181 */
2182
2183 static void sock_def_wakeup(struct sock *sk)
2184 {
2185 struct socket_wq *wq;
2186
2187 rcu_read_lock();
2188 wq = rcu_dereference(sk->sk_wq);
2189 if (wq_has_sleeper(wq))
2190 wake_up_interruptible_all(&wq->wait);
2191 rcu_read_unlock();
2192 }
2193
2194 static void sock_def_error_report(struct sock *sk)
2195 {
2196 struct socket_wq *wq;
2197
2198 rcu_read_lock();
2199 wq = rcu_dereference(sk->sk_wq);
2200 if (wq_has_sleeper(wq))
2201 wake_up_interruptible_poll(&wq->wait, POLLERR);
2202 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2203 rcu_read_unlock();
2204 }
2205
2206 static void sock_def_readable(struct sock *sk, int len)
2207 {
2208 struct socket_wq *wq;
2209
2210 rcu_read_lock();
2211 wq = rcu_dereference(sk->sk_wq);
2212 if (wq_has_sleeper(wq))
2213 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2214 POLLRDNORM | POLLRDBAND);
2215 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2216 rcu_read_unlock();
2217 }
2218
2219 static void sock_def_write_space(struct sock *sk)
2220 {
2221 struct socket_wq *wq;
2222
2223 rcu_read_lock();
2224
2225 /* Do not wake up a writer until he can make "significant"
2226 * progress. --DaveM
2227 */
2228 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2229 wq = rcu_dereference(sk->sk_wq);
2230 if (wq_has_sleeper(wq))
2231 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2232 POLLWRNORM | POLLWRBAND);
2233
2234 /* Should agree with poll, otherwise some programs break */
2235 if (sock_writeable(sk))
2236 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2237 }
2238
2239 rcu_read_unlock();
2240 }
2241
2242 static void sock_def_destruct(struct sock *sk)
2243 {
2244 kfree(sk->sk_protinfo);
2245 }
2246
2247 void sk_send_sigurg(struct sock *sk)
2248 {
2249 if (sk->sk_socket && sk->sk_socket->file)
2250 if (send_sigurg(&sk->sk_socket->file->f_owner))
2251 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2252 }
2253 EXPORT_SYMBOL(sk_send_sigurg);
2254
2255 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2256 unsigned long expires)
2257 {
2258 if (!mod_timer(timer, expires))
2259 sock_hold(sk);
2260 }
2261 EXPORT_SYMBOL(sk_reset_timer);
2262
2263 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2264 {
2265 if (del_timer(timer))
2266 __sock_put(sk);
2267 }
2268 EXPORT_SYMBOL(sk_stop_timer);
2269
2270 void sock_init_data(struct socket *sock, struct sock *sk)
2271 {
2272 skb_queue_head_init(&sk->sk_receive_queue);
2273 skb_queue_head_init(&sk->sk_write_queue);
2274 skb_queue_head_init(&sk->sk_error_queue);
2275 #ifdef CONFIG_NET_DMA
2276 skb_queue_head_init(&sk->sk_async_wait_queue);
2277 #endif
2278
2279 sk->sk_send_head = NULL;
2280
2281 init_timer(&sk->sk_timer);
2282
2283 sk->sk_allocation = GFP_KERNEL;
2284 sk->sk_rcvbuf = sysctl_rmem_default;
2285 sk->sk_sndbuf = sysctl_wmem_default;
2286 sk->sk_state = TCP_CLOSE;
2287 sk_set_socket(sk, sock);
2288
2289 sock_set_flag(sk, SOCK_ZAPPED);
2290
2291 if (sock) {
2292 sk->sk_type = sock->type;
2293 sk->sk_wq = sock->wq;
2294 sock->sk = sk;
2295 } else
2296 sk->sk_wq = NULL;
2297
2298 spin_lock_init(&sk->sk_dst_lock);
2299 rwlock_init(&sk->sk_callback_lock);
2300 lockdep_set_class_and_name(&sk->sk_callback_lock,
2301 af_callback_keys + sk->sk_family,
2302 af_family_clock_key_strings[sk->sk_family]);
2303
2304 sk->sk_state_change = sock_def_wakeup;
2305 sk->sk_data_ready = sock_def_readable;
2306 sk->sk_write_space = sock_def_write_space;
2307 sk->sk_error_report = sock_def_error_report;
2308 sk->sk_destruct = sock_def_destruct;
2309
2310 sk->sk_frag.page = NULL;
2311 sk->sk_frag.offset = 0;
2312 sk->sk_peek_off = -1;
2313
2314 sk->sk_peer_pid = NULL;
2315 sk->sk_peer_cred = NULL;
2316 sk->sk_write_pending = 0;
2317 sk->sk_rcvlowat = 1;
2318 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2319 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2320
2321 sk->sk_stamp = ktime_set(-1L, 0);
2322
2323 sk->sk_pacing_rate = ~0U;
2324 /*
2325 * Before updating sk_refcnt, we must commit prior changes to memory
2326 * (Documentation/RCU/rculist_nulls.txt for details)
2327 */
2328 smp_wmb();
2329 atomic_set(&sk->sk_refcnt, 1);
2330 atomic_set(&sk->sk_drops, 0);
2331 }
2332 EXPORT_SYMBOL(sock_init_data);
2333
2334 void lock_sock_nested(struct sock *sk, int subclass)
2335 {
2336 might_sleep();
2337 spin_lock_bh(&sk->sk_lock.slock);
2338 if (sk->sk_lock.owned)
2339 __lock_sock(sk);
2340 sk->sk_lock.owned = 1;
2341 spin_unlock(&sk->sk_lock.slock);
2342 /*
2343 * The sk_lock has mutex_lock() semantics here:
2344 */
2345 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2346 local_bh_enable();
2347 }
2348 EXPORT_SYMBOL(lock_sock_nested);
2349
2350 void release_sock(struct sock *sk)
2351 {
2352 /*
2353 * The sk_lock has mutex_unlock() semantics:
2354 */
2355 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2356
2357 spin_lock_bh(&sk->sk_lock.slock);
2358 if (sk->sk_backlog.tail)
2359 __release_sock(sk);
2360
2361 /* Warning : release_cb() might need to release sk ownership,
2362 * ie call sock_release_ownership(sk) before us.
2363 */
2364 if (sk->sk_prot->release_cb)
2365 sk->sk_prot->release_cb(sk);
2366
2367 sock_release_ownership(sk);
2368 if (waitqueue_active(&sk->sk_lock.wq))
2369 wake_up(&sk->sk_lock.wq);
2370 spin_unlock_bh(&sk->sk_lock.slock);
2371 }
2372 EXPORT_SYMBOL(release_sock);
2373
2374 /**
2375 * lock_sock_fast - fast version of lock_sock
2376 * @sk: socket
2377 *
2378 * This version should be used for very small section, where process wont block
2379 * return false if fast path is taken
2380 * sk_lock.slock locked, owned = 0, BH disabled
2381 * return true if slow path is taken
2382 * sk_lock.slock unlocked, owned = 1, BH enabled
2383 */
2384 bool lock_sock_fast(struct sock *sk)
2385 {
2386 might_sleep();
2387 spin_lock_bh(&sk->sk_lock.slock);
2388
2389 if (!sk->sk_lock.owned)
2390 /*
2391 * Note : We must disable BH
2392 */
2393 return false;
2394
2395 __lock_sock(sk);
2396 sk->sk_lock.owned = 1;
2397 spin_unlock(&sk->sk_lock.slock);
2398 /*
2399 * The sk_lock has mutex_lock() semantics here:
2400 */
2401 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2402 local_bh_enable();
2403 return true;
2404 }
2405 EXPORT_SYMBOL(lock_sock_fast);
2406
2407 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2408 {
2409 struct timeval tv;
2410 if (!sock_flag(sk, SOCK_TIMESTAMP))
2411 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2412 tv = ktime_to_timeval(sk->sk_stamp);
2413 if (tv.tv_sec == -1)
2414 return -ENOENT;
2415 if (tv.tv_sec == 0) {
2416 sk->sk_stamp = ktime_get_real();
2417 tv = ktime_to_timeval(sk->sk_stamp);
2418 }
2419 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2420 }
2421 EXPORT_SYMBOL(sock_get_timestamp);
2422
2423 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2424 {
2425 struct timespec ts;
2426 if (!sock_flag(sk, SOCK_TIMESTAMP))
2427 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2428 ts = ktime_to_timespec(sk->sk_stamp);
2429 if (ts.tv_sec == -1)
2430 return -ENOENT;
2431 if (ts.tv_sec == 0) {
2432 sk->sk_stamp = ktime_get_real();
2433 ts = ktime_to_timespec(sk->sk_stamp);
2434 }
2435 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2436 }
2437 EXPORT_SYMBOL(sock_get_timestampns);
2438
2439 void sock_enable_timestamp(struct sock *sk, int flag)
2440 {
2441 if (!sock_flag(sk, flag)) {
2442 unsigned long previous_flags = sk->sk_flags;
2443
2444 sock_set_flag(sk, flag);
2445 /*
2446 * we just set one of the two flags which require net
2447 * time stamping, but time stamping might have been on
2448 * already because of the other one
2449 */
2450 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2451 net_enable_timestamp();
2452 }
2453 }
2454
2455 /*
2456 * Get a socket option on an socket.
2457 *
2458 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2459 * asynchronous errors should be reported by getsockopt. We assume
2460 * this means if you specify SO_ERROR (otherwise whats the point of it).
2461 */
2462 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2463 char __user *optval, int __user *optlen)
2464 {
2465 struct sock *sk = sock->sk;
2466
2467 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2468 }
2469 EXPORT_SYMBOL(sock_common_getsockopt);
2470
2471 #ifdef CONFIG_COMPAT
2472 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2473 char __user *optval, int __user *optlen)
2474 {
2475 struct sock *sk = sock->sk;
2476
2477 if (sk->sk_prot->compat_getsockopt != NULL)
2478 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2479 optval, optlen);
2480 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2481 }
2482 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2483 #endif
2484
2485 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2486 struct msghdr *msg, size_t size, int flags)
2487 {
2488 struct sock *sk = sock->sk;
2489 int addr_len = 0;
2490 int err;
2491
2492 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2493 flags & ~MSG_DONTWAIT, &addr_len);
2494 if (err >= 0)
2495 msg->msg_namelen = addr_len;
2496 return err;
2497 }
2498 EXPORT_SYMBOL(sock_common_recvmsg);
2499
2500 /*
2501 * Set socket options on an inet socket.
2502 */
2503 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2504 char __user *optval, unsigned int optlen)
2505 {
2506 struct sock *sk = sock->sk;
2507
2508 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2509 }
2510 EXPORT_SYMBOL(sock_common_setsockopt);
2511
2512 #ifdef CONFIG_COMPAT
2513 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2514 char __user *optval, unsigned int optlen)
2515 {
2516 struct sock *sk = sock->sk;
2517
2518 if (sk->sk_prot->compat_setsockopt != NULL)
2519 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2520 optval, optlen);
2521 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2522 }
2523 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2524 #endif
2525
2526 void sk_common_release(struct sock *sk)
2527 {
2528 if (sk->sk_prot->destroy)
2529 sk->sk_prot->destroy(sk);
2530
2531 /*
2532 * Observation: when sock_common_release is called, processes have
2533 * no access to socket. But net still has.
2534 * Step one, detach it from networking:
2535 *
2536 * A. Remove from hash tables.
2537 */
2538
2539 sk->sk_prot->unhash(sk);
2540
2541 /*
2542 * In this point socket cannot receive new packets, but it is possible
2543 * that some packets are in flight because some CPU runs receiver and
2544 * did hash table lookup before we unhashed socket. They will achieve
2545 * receive queue and will be purged by socket destructor.
2546 *
2547 * Also we still have packets pending on receive queue and probably,
2548 * our own packets waiting in device queues. sock_destroy will drain
2549 * receive queue, but transmitted packets will delay socket destruction
2550 * until the last reference will be released.
2551 */
2552
2553 sock_orphan(sk);
2554
2555 xfrm_sk_free_policy(sk);
2556
2557 sk_refcnt_debug_release(sk);
2558
2559 if (sk->sk_frag.page) {
2560 put_page(sk->sk_frag.page);
2561 sk->sk_frag.page = NULL;
2562 }
2563
2564 sock_put(sk);
2565 }
2566 EXPORT_SYMBOL(sk_common_release);
2567
2568 #ifdef CONFIG_PROC_FS
2569 #define PROTO_INUSE_NR 64 /* should be enough for the first time */
2570 struct prot_inuse {
2571 int val[PROTO_INUSE_NR];
2572 };
2573
2574 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2575
2576 #ifdef CONFIG_NET_NS
2577 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2578 {
2579 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2580 }
2581 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2582
2583 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2584 {
2585 int cpu, idx = prot->inuse_idx;
2586 int res = 0;
2587
2588 for_each_possible_cpu(cpu)
2589 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2590
2591 return res >= 0 ? res : 0;
2592 }
2593 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2594
2595 static int __net_init sock_inuse_init_net(struct net *net)
2596 {
2597 net->core.inuse = alloc_percpu(struct prot_inuse);
2598 return net->core.inuse ? 0 : -ENOMEM;
2599 }
2600
2601 static void __net_exit sock_inuse_exit_net(struct net *net)
2602 {
2603 free_percpu(net->core.inuse);
2604 }
2605
2606 static struct pernet_operations net_inuse_ops = {
2607 .init = sock_inuse_init_net,
2608 .exit = sock_inuse_exit_net,
2609 };
2610
2611 static __init int net_inuse_init(void)
2612 {
2613 if (register_pernet_subsys(&net_inuse_ops))
2614 panic("Cannot initialize net inuse counters");
2615
2616 return 0;
2617 }
2618
2619 core_initcall(net_inuse_init);
2620 #else
2621 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2622
2623 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2624 {
2625 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2626 }
2627 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2628
2629 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2630 {
2631 int cpu, idx = prot->inuse_idx;
2632 int res = 0;
2633
2634 for_each_possible_cpu(cpu)
2635 res += per_cpu(prot_inuse, cpu).val[idx];
2636
2637 return res >= 0 ? res : 0;
2638 }
2639 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2640 #endif
2641
2642 static void assign_proto_idx(struct proto *prot)
2643 {
2644 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2645
2646 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2647 pr_err("PROTO_INUSE_NR exhausted\n");
2648 return;
2649 }
2650
2651 set_bit(prot->inuse_idx, proto_inuse_idx);
2652 }
2653
2654 static void release_proto_idx(struct proto *prot)
2655 {
2656 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2657 clear_bit(prot->inuse_idx, proto_inuse_idx);
2658 }
2659 #else
2660 static inline void assign_proto_idx(struct proto *prot)
2661 {
2662 }
2663
2664 static inline void release_proto_idx(struct proto *prot)
2665 {
2666 }
2667 #endif
2668
2669 int proto_register(struct proto *prot, int alloc_slab)
2670 {
2671 if (alloc_slab) {
2672 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2673 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2674 NULL);
2675
2676 if (prot->slab == NULL) {
2677 pr_crit("%s: Can't create sock SLAB cache!\n",
2678 prot->name);
2679 goto out;
2680 }
2681
2682 if (prot->rsk_prot != NULL) {
2683 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2684 if (prot->rsk_prot->slab_name == NULL)
2685 goto out_free_sock_slab;
2686
2687 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2688 prot->rsk_prot->obj_size, 0,
2689 SLAB_HWCACHE_ALIGN, NULL);
2690
2691 if (prot->rsk_prot->slab == NULL) {
2692 pr_crit("%s: Can't create request sock SLAB cache!\n",
2693 prot->name);
2694 goto out_free_request_sock_slab_name;
2695 }
2696 }
2697
2698 if (prot->twsk_prot != NULL) {
2699 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2700
2701 if (prot->twsk_prot->twsk_slab_name == NULL)
2702 goto out_free_request_sock_slab;
2703
2704 prot->twsk_prot->twsk_slab =
2705 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2706 prot->twsk_prot->twsk_obj_size,
2707 0,
2708 SLAB_HWCACHE_ALIGN |
2709 prot->slab_flags,
2710 NULL);
2711 if (prot->twsk_prot->twsk_slab == NULL)
2712 goto out_free_timewait_sock_slab_name;
2713 }
2714 }
2715
2716 mutex_lock(&proto_list_mutex);
2717 list_add(&prot->node, &proto_list);
2718 assign_proto_idx(prot);
2719 mutex_unlock(&proto_list_mutex);
2720 return 0;
2721
2722 out_free_timewait_sock_slab_name:
2723 kfree(prot->twsk_prot->twsk_slab_name);
2724 out_free_request_sock_slab:
2725 if (prot->rsk_prot && prot->rsk_prot->slab) {
2726 kmem_cache_destroy(prot->rsk_prot->slab);
2727 prot->rsk_prot->slab = NULL;
2728 }
2729 out_free_request_sock_slab_name:
2730 if (prot->rsk_prot)
2731 kfree(prot->rsk_prot->slab_name);
2732 out_free_sock_slab:
2733 kmem_cache_destroy(prot->slab);
2734 prot->slab = NULL;
2735 out:
2736 return -ENOBUFS;
2737 }
2738 EXPORT_SYMBOL(proto_register);
2739
2740 void proto_unregister(struct proto *prot)
2741 {
2742 mutex_lock(&proto_list_mutex);
2743 release_proto_idx(prot);
2744 list_del(&prot->node);
2745 mutex_unlock(&proto_list_mutex);
2746
2747 if (prot->slab != NULL) {
2748 kmem_cache_destroy(prot->slab);
2749 prot->slab = NULL;
2750 }
2751
2752 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2753 kmem_cache_destroy(prot->rsk_prot->slab);
2754 kfree(prot->rsk_prot->slab_name);
2755 prot->rsk_prot->slab = NULL;
2756 }
2757
2758 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2759 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2760 kfree(prot->twsk_prot->twsk_slab_name);
2761 prot->twsk_prot->twsk_slab = NULL;
2762 }
2763 }
2764 EXPORT_SYMBOL(proto_unregister);
2765
2766 #ifdef CONFIG_PROC_FS
2767 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2768 __acquires(proto_list_mutex)
2769 {
2770 mutex_lock(&proto_list_mutex);
2771 return seq_list_start_head(&proto_list, *pos);
2772 }
2773
2774 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2775 {
2776 return seq_list_next(v, &proto_list, pos);
2777 }
2778
2779 static void proto_seq_stop(struct seq_file *seq, void *v)
2780 __releases(proto_list_mutex)
2781 {
2782 mutex_unlock(&proto_list_mutex);
2783 }
2784
2785 static char proto_method_implemented(const void *method)
2786 {
2787 return method == NULL ? 'n' : 'y';
2788 }
2789 static long sock_prot_memory_allocated(struct proto *proto)
2790 {
2791 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2792 }
2793
2794 static char *sock_prot_memory_pressure(struct proto *proto)
2795 {
2796 return proto->memory_pressure != NULL ?
2797 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2798 }
2799
2800 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2801 {
2802
2803 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2804 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2805 proto->name,
2806 proto->obj_size,
2807 sock_prot_inuse_get(seq_file_net(seq), proto),
2808 sock_prot_memory_allocated(proto),
2809 sock_prot_memory_pressure(proto),
2810 proto->max_header,
2811 proto->slab == NULL ? "no" : "yes",
2812 module_name(proto->owner),
2813 proto_method_implemented(proto->close),
2814 proto_method_implemented(proto->connect),
2815 proto_method_implemented(proto->disconnect),
2816 proto_method_implemented(proto->accept),
2817 proto_method_implemented(proto->ioctl),
2818 proto_method_implemented(proto->init),
2819 proto_method_implemented(proto->destroy),
2820 proto_method_implemented(proto->shutdown),
2821 proto_method_implemented(proto->setsockopt),
2822 proto_method_implemented(proto->getsockopt),
2823 proto_method_implemented(proto->sendmsg),
2824 proto_method_implemented(proto->recvmsg),
2825 proto_method_implemented(proto->sendpage),
2826 proto_method_implemented(proto->bind),
2827 proto_method_implemented(proto->backlog_rcv),
2828 proto_method_implemented(proto->hash),
2829 proto_method_implemented(proto->unhash),
2830 proto_method_implemented(proto->get_port),
2831 proto_method_implemented(proto->enter_memory_pressure));
2832 }
2833
2834 static int proto_seq_show(struct seq_file *seq, void *v)
2835 {
2836 if (v == &proto_list)
2837 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2838 "protocol",
2839 "size",
2840 "sockets",
2841 "memory",
2842 "press",
2843 "maxhdr",
2844 "slab",
2845 "module",
2846 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2847 else
2848 proto_seq_printf(seq, list_entry(v, struct proto, node));
2849 return 0;
2850 }
2851
2852 static const struct seq_operations proto_seq_ops = {
2853 .start = proto_seq_start,
2854 .next = proto_seq_next,
2855 .stop = proto_seq_stop,
2856 .show = proto_seq_show,
2857 };
2858
2859 static int proto_seq_open(struct inode *inode, struct file *file)
2860 {
2861 return seq_open_net(inode, file, &proto_seq_ops,
2862 sizeof(struct seq_net_private));
2863 }
2864
2865 static const struct file_operations proto_seq_fops = {
2866 .owner = THIS_MODULE,
2867 .open = proto_seq_open,
2868 .read = seq_read,
2869 .llseek = seq_lseek,
2870 .release = seq_release_net,
2871 };
2872
2873 static __net_init int proto_init_net(struct net *net)
2874 {
2875 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
2876 return -ENOMEM;
2877
2878 return 0;
2879 }
2880
2881 static __net_exit void proto_exit_net(struct net *net)
2882 {
2883 remove_proc_entry("protocols", net->proc_net);
2884 }
2885
2886
2887 static __net_initdata struct pernet_operations proto_net_ops = {
2888 .init = proto_init_net,
2889 .exit = proto_exit_net,
2890 };
2891
2892 static int __init proto_init(void)
2893 {
2894 return register_pernet_subsys(&proto_net_ops);
2895 }
2896
2897 subsys_initcall(proto_init);
2898
2899 #endif /* PROC_FS */