net: annotate struct sock bitfield
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / core / sock.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
4ec93edb 35 * code. The ACK stuff can wait and needs major
1da177e4
LT
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
4fc268d2 92#include <linux/capability.h>
1da177e4
LT
93#include <linux/errno.h>
94#include <linux/types.h>
95#include <linux/socket.h>
96#include <linux/in.h>
97#include <linux/kernel.h>
1da177e4
LT
98#include <linux/module.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/sched.h>
102#include <linux/timer.h>
103#include <linux/string.h>
104#include <linux/sockios.h>
105#include <linux/net.h>
106#include <linux/mm.h>
107#include <linux/slab.h>
108#include <linux/interrupt.h>
109#include <linux/poll.h>
110#include <linux/tcp.h>
111#include <linux/init.h>
a1f8e7f7 112#include <linux/highmem.h>
1da177e4
LT
113
114#include <asm/uaccess.h>
115#include <asm/system.h>
116
117#include <linux/netdevice.h>
118#include <net/protocol.h>
119#include <linux/skbuff.h>
457c4cbc 120#include <net/net_namespace.h>
2e6599cb 121#include <net/request_sock.h>
1da177e4 122#include <net/sock.h>
20d49473 123#include <linux/net_tstamp.h>
1da177e4
LT
124#include <net/xfrm.h>
125#include <linux/ipsec.h>
126
127#include <linux/filter.h>
128
129#ifdef CONFIG_INET
130#include <net/tcp.h>
131#endif
132
da21f24d
IM
133/*
134 * Each address family might have different locking rules, so we have
135 * one slock key per address family:
136 */
a5b5bb9a
IM
137static struct lock_class_key af_family_keys[AF_MAX];
138static struct lock_class_key af_family_slock_keys[AF_MAX];
139
a5b5bb9a
IM
140/*
141 * Make lock validator output more readable. (we pre-construct these
142 * strings build-time, so that runtime initialization of socket
143 * locks is fast):
144 */
145static const char *af_family_key_strings[AF_MAX+1] = {
146 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
147 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
148 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
149 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
150 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
151 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
152 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
cbd151bf 153 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
a5b5bb9a 154 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
cd05acfe 155 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
17926a79 156 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
bce7b154
RDC
157 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
158 "sk_lock-AF_MAX"
a5b5bb9a
IM
159};
160static const char *af_family_slock_key_strings[AF_MAX+1] = {
161 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
162 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
163 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
164 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
165 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
166 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
167 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
cbd151bf 168 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
a5b5bb9a 169 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
cd05acfe 170 "slock-27" , "slock-28" , "slock-AF_CAN" ,
17926a79 171 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
bce7b154
RDC
172 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
173 "slock-AF_MAX"
a5b5bb9a 174};
443aef0e
PZ
175static const char *af_family_clock_key_strings[AF_MAX+1] = {
176 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
177 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
178 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
179 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
180 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
181 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
182 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
cbd151bf 183 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
443aef0e 184 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
b4942af6 185 "clock-27" , "clock-28" , "clock-AF_CAN" ,
e51f802b 186 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
bce7b154
RDC
187 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
188 "clock-AF_MAX"
443aef0e 189};
da21f24d
IM
190
191/*
192 * sk_callback_lock locking rules are per-address-family,
193 * so split the lock classes by using a per-AF key:
194 */
195static struct lock_class_key af_callback_keys[AF_MAX];
196
1da177e4
LT
197/* Take into consideration the size of the struct sk_buff overhead in the
198 * determination of these values, since that is non-constant across
199 * platforms. This makes socket queueing behavior and performance
200 * not depend upon such differences.
201 */
202#define _SK_MEM_PACKETS 256
203#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
204#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
205#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
206
207/* Run time adjustable parameters. */
ab32ea5d
BH
208__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
209__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
210__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
211__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
1da177e4
LT
212
213/* Maximal space eaten by iovec or ancilliary data plus some space */
ab32ea5d 214int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
1da177e4
LT
215
216static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
217{
218 struct timeval tv;
219
220 if (optlen < sizeof(tv))
221 return -EINVAL;
222 if (copy_from_user(&tv, optval, sizeof(tv)))
223 return -EFAULT;
ba78073e
VA
224 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
225 return -EDOM;
1da177e4 226
ba78073e 227 if (tv.tv_sec < 0) {
6f11df83
AM
228 static int warned __read_mostly;
229
ba78073e 230 *timeo_p = 0;
50aab54f 231 if (warned < 10 && net_ratelimit()) {
ba78073e
VA
232 warned++;
233 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
234 "tries to set negative timeout\n",
ba25f9dc 235 current->comm, task_pid_nr(current));
50aab54f 236 }
ba78073e
VA
237 return 0;
238 }
1da177e4
LT
239 *timeo_p = MAX_SCHEDULE_TIMEOUT;
240 if (tv.tv_sec == 0 && tv.tv_usec == 0)
241 return 0;
242 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
243 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
244 return 0;
245}
246
247static void sock_warn_obsolete_bsdism(const char *name)
248{
249 static int warned;
250 static char warncomm[TASK_COMM_LEN];
4ec93edb
YH
251 if (strcmp(warncomm, current->comm) && warned < 5) {
252 strcpy(warncomm, current->comm);
1da177e4
LT
253 printk(KERN_WARNING "process `%s' is using obsolete "
254 "%s SO_BSDCOMPAT\n", warncomm, name);
255 warned++;
256 }
257}
258
20d49473 259static void sock_disable_timestamp(struct sock *sk, int flag)
4ec93edb 260{
20d49473
PO
261 if (sock_flag(sk, flag)) {
262 sock_reset_flag(sk, flag);
263 if (!sock_flag(sk, SOCK_TIMESTAMP) &&
264 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
265 net_disable_timestamp();
266 }
1da177e4
LT
267 }
268}
269
270
f0088a50
DV
271int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
272{
273 int err = 0;
274 int skb_len;
275
9ee6b7f1 276 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
f0088a50
DV
277 number of warnings when compiling with -W --ANK
278 */
279 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
280 (unsigned)sk->sk_rcvbuf) {
281 err = -ENOMEM;
282 goto out;
283 }
284
fda9ef5d 285 err = sk_filter(sk, skb);
f0088a50
DV
286 if (err)
287 goto out;
288
3ab224be
HA
289 if (!sk_rmem_schedule(sk, skb->truesize)) {
290 err = -ENOBUFS;
291 goto out;
292 }
293
f0088a50
DV
294 skb->dev = NULL;
295 skb_set_owner_r(skb, sk);
49ad9599 296
f0088a50
DV
297 /* Cache the SKB length before we tack it onto the receive
298 * queue. Once it is added it no longer belongs to us and
299 * may be freed by other threads of control pulling packets
300 * from the queue.
301 */
302 skb_len = skb->len;
303
304 skb_queue_tail(&sk->sk_receive_queue, skb);
305
306 if (!sock_flag(sk, SOCK_DEAD))
307 sk->sk_data_ready(sk, skb_len);
308out:
309 return err;
310}
311EXPORT_SYMBOL(sock_queue_rcv_skb);
312
58a5a7b9 313int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
f0088a50
DV
314{
315 int rc = NET_RX_SUCCESS;
316
fda9ef5d 317 if (sk_filter(sk, skb))
f0088a50
DV
318 goto discard_and_relse;
319
320 skb->dev = NULL;
321
58a5a7b9
ACM
322 if (nested)
323 bh_lock_sock_nested(sk);
324 else
325 bh_lock_sock(sk);
a5b5bb9a
IM
326 if (!sock_owned_by_user(sk)) {
327 /*
328 * trylock + unlock semantics:
329 */
330 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
331
c57943a1 332 rc = sk_backlog_rcv(sk, skb);
a5b5bb9a
IM
333
334 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
335 } else
f0088a50
DV
336 sk_add_backlog(sk, skb);
337 bh_unlock_sock(sk);
338out:
339 sock_put(sk);
340 return rc;
341discard_and_relse:
342 kfree_skb(skb);
343 goto out;
344}
345EXPORT_SYMBOL(sk_receive_skb);
346
347struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
348{
349 struct dst_entry *dst = sk->sk_dst_cache;
350
351 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
352 sk->sk_dst_cache = NULL;
353 dst_release(dst);
354 return NULL;
355 }
356
357 return dst;
358}
359EXPORT_SYMBOL(__sk_dst_check);
360
361struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
362{
363 struct dst_entry *dst = sk_dst_get(sk);
364
365 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
366 sk_dst_reset(sk);
367 dst_release(dst);
368 return NULL;
369 }
370
371 return dst;
372}
373EXPORT_SYMBOL(sk_dst_check);
374
4878809f
DM
375static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
376{
377 int ret = -ENOPROTOOPT;
378#ifdef CONFIG_NETDEVICES
3b1e0a65 379 struct net *net = sock_net(sk);
4878809f
DM
380 char devname[IFNAMSIZ];
381 int index;
382
383 /* Sorry... */
384 ret = -EPERM;
385 if (!capable(CAP_NET_RAW))
386 goto out;
387
388 ret = -EINVAL;
389 if (optlen < 0)
390 goto out;
391
392 /* Bind this socket to a particular device like "eth0",
393 * as specified in the passed interface name. If the
394 * name is "" or the option length is zero the socket
395 * is not bound.
396 */
397 if (optlen > IFNAMSIZ - 1)
398 optlen = IFNAMSIZ - 1;
399 memset(devname, 0, sizeof(devname));
400
401 ret = -EFAULT;
402 if (copy_from_user(devname, optval, optlen))
403 goto out;
404
405 if (devname[0] == '\0') {
406 index = 0;
407 } else {
881d966b 408 struct net_device *dev = dev_get_by_name(net, devname);
4878809f
DM
409
410 ret = -ENODEV;
411 if (!dev)
412 goto out;
413
414 index = dev->ifindex;
415 dev_put(dev);
416 }
417
418 lock_sock(sk);
419 sk->sk_bound_dev_if = index;
420 sk_dst_reset(sk);
421 release_sock(sk);
422
423 ret = 0;
424
425out:
426#endif
427
428 return ret;
429}
430
c0ef877b
PE
431static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
432{
433 if (valbool)
434 sock_set_flag(sk, bit);
435 else
436 sock_reset_flag(sk, bit);
437}
438
1da177e4
LT
439/*
440 * This is meant for all protocols to use and covers goings on
441 * at the socket level. Everything here is generic.
442 */
443
444int sock_setsockopt(struct socket *sock, int level, int optname,
445 char __user *optval, int optlen)
446{
447 struct sock *sk=sock->sk;
1da177e4
LT
448 int val;
449 int valbool;
450 struct linger ling;
451 int ret = 0;
4ec93edb 452
1da177e4
LT
453 /*
454 * Options without arguments
455 */
456
4878809f
DM
457 if (optname == SO_BINDTODEVICE)
458 return sock_bindtodevice(sk, optval, optlen);
459
e71a4783
SH
460 if (optlen < sizeof(int))
461 return -EINVAL;
4ec93edb 462
1da177e4
LT
463 if (get_user(val, (int __user *)optval))
464 return -EFAULT;
4ec93edb
YH
465
466 valbool = val?1:0;
1da177e4
LT
467
468 lock_sock(sk);
469
e71a4783
SH
470 switch(optname) {
471 case SO_DEBUG:
472 if (val && !capable(CAP_NET_ADMIN)) {
473 ret = -EACCES;
c0ef877b
PE
474 } else
475 sock_valbool_flag(sk, SOCK_DBG, valbool);
e71a4783
SH
476 break;
477 case SO_REUSEADDR:
478 sk->sk_reuse = valbool;
479 break;
480 case SO_TYPE:
481 case SO_ERROR:
482 ret = -ENOPROTOOPT;
483 break;
484 case SO_DONTROUTE:
c0ef877b 485 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
e71a4783
SH
486 break;
487 case SO_BROADCAST:
488 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
489 break;
490 case SO_SNDBUF:
491 /* Don't error on this BSD doesn't and if you think
492 about it this is right. Otherwise apps have to
493 play 'guess the biggest size' games. RCVBUF/SNDBUF
494 are treated in BSD as hints */
495
496 if (val > sysctl_wmem_max)
497 val = sysctl_wmem_max;
b0573dea 498set_sndbuf:
e71a4783
SH
499 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
500 if ((val * 2) < SOCK_MIN_SNDBUF)
501 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
502 else
503 sk->sk_sndbuf = val * 2;
1da177e4 504
e71a4783
SH
505 /*
506 * Wake up sending tasks if we
507 * upped the value.
508 */
509 sk->sk_write_space(sk);
510 break;
1da177e4 511
e71a4783
SH
512 case SO_SNDBUFFORCE:
513 if (!capable(CAP_NET_ADMIN)) {
514 ret = -EPERM;
515 break;
516 }
517 goto set_sndbuf;
b0573dea 518
e71a4783
SH
519 case SO_RCVBUF:
520 /* Don't error on this BSD doesn't and if you think
521 about it this is right. Otherwise apps have to
522 play 'guess the biggest size' games. RCVBUF/SNDBUF
523 are treated in BSD as hints */
4ec93edb 524
e71a4783
SH
525 if (val > sysctl_rmem_max)
526 val = sysctl_rmem_max;
b0573dea 527set_rcvbuf:
e71a4783
SH
528 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
529 /*
530 * We double it on the way in to account for
531 * "struct sk_buff" etc. overhead. Applications
532 * assume that the SO_RCVBUF setting they make will
533 * allow that much actual data to be received on that
534 * socket.
535 *
536 * Applications are unaware that "struct sk_buff" and
537 * other overheads allocate from the receive buffer
538 * during socket buffer allocation.
539 *
540 * And after considering the possible alternatives,
541 * returning the value we actually used in getsockopt
542 * is the most desirable behavior.
543 */
544 if ((val * 2) < SOCK_MIN_RCVBUF)
545 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
546 else
547 sk->sk_rcvbuf = val * 2;
548 break;
549
550 case SO_RCVBUFFORCE:
551 if (!capable(CAP_NET_ADMIN)) {
552 ret = -EPERM;
1da177e4 553 break;
e71a4783
SH
554 }
555 goto set_rcvbuf;
1da177e4 556
e71a4783 557 case SO_KEEPALIVE:
1da177e4 558#ifdef CONFIG_INET
e71a4783
SH
559 if (sk->sk_protocol == IPPROTO_TCP)
560 tcp_set_keepalive(sk, valbool);
1da177e4 561#endif
e71a4783
SH
562 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
563 break;
564
565 case SO_OOBINLINE:
566 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
567 break;
568
569 case SO_NO_CHECK:
570 sk->sk_no_check = valbool;
571 break;
572
573 case SO_PRIORITY:
574 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
575 sk->sk_priority = val;
576 else
577 ret = -EPERM;
578 break;
579
580 case SO_LINGER:
581 if (optlen < sizeof(ling)) {
582 ret = -EINVAL; /* 1003.1g */
1da177e4 583 break;
e71a4783
SH
584 }
585 if (copy_from_user(&ling,optval,sizeof(ling))) {
586 ret = -EFAULT;
1da177e4 587 break;
e71a4783
SH
588 }
589 if (!ling.l_onoff)
590 sock_reset_flag(sk, SOCK_LINGER);
591 else {
1da177e4 592#if (BITS_PER_LONG == 32)
e71a4783
SH
593 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
594 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
1da177e4 595 else
e71a4783
SH
596#endif
597 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
598 sock_set_flag(sk, SOCK_LINGER);
599 }
600 break;
601
602 case SO_BSDCOMPAT:
603 sock_warn_obsolete_bsdism("setsockopt");
604 break;
605
606 case SO_PASSCRED:
607 if (valbool)
608 set_bit(SOCK_PASSCRED, &sock->flags);
609 else
610 clear_bit(SOCK_PASSCRED, &sock->flags);
611 break;
612
613 case SO_TIMESTAMP:
92f37fd2 614 case SO_TIMESTAMPNS:
e71a4783 615 if (valbool) {
92f37fd2
ED
616 if (optname == SO_TIMESTAMP)
617 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
618 else
619 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
e71a4783 620 sock_set_flag(sk, SOCK_RCVTSTAMP);
20d49473 621 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
92f37fd2 622 } else {
e71a4783 623 sock_reset_flag(sk, SOCK_RCVTSTAMP);
92f37fd2
ED
624 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
625 }
e71a4783
SH
626 break;
627
20d49473
PO
628 case SO_TIMESTAMPING:
629 if (val & ~SOF_TIMESTAMPING_MASK) {
630 ret = EINVAL;
631 break;
632 }
633 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
634 val & SOF_TIMESTAMPING_TX_HARDWARE);
635 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
636 val & SOF_TIMESTAMPING_TX_SOFTWARE);
637 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
638 val & SOF_TIMESTAMPING_RX_HARDWARE);
639 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
640 sock_enable_timestamp(sk,
641 SOCK_TIMESTAMPING_RX_SOFTWARE);
642 else
643 sock_disable_timestamp(sk,
644 SOCK_TIMESTAMPING_RX_SOFTWARE);
645 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
646 val & SOF_TIMESTAMPING_SOFTWARE);
647 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
648 val & SOF_TIMESTAMPING_SYS_HARDWARE);
649 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
650 val & SOF_TIMESTAMPING_RAW_HARDWARE);
651 break;
652
e71a4783
SH
653 case SO_RCVLOWAT:
654 if (val < 0)
655 val = INT_MAX;
656 sk->sk_rcvlowat = val ? : 1;
657 break;
658
659 case SO_RCVTIMEO:
660 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
661 break;
662
663 case SO_SNDTIMEO:
664 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
665 break;
1da177e4 666
e71a4783
SH
667 case SO_ATTACH_FILTER:
668 ret = -EINVAL;
669 if (optlen == sizeof(struct sock_fprog)) {
670 struct sock_fprog fprog;
1da177e4 671
e71a4783
SH
672 ret = -EFAULT;
673 if (copy_from_user(&fprog, optval, sizeof(fprog)))
1da177e4 674 break;
e71a4783
SH
675
676 ret = sk_attach_filter(&fprog, sk);
677 }
678 break;
679
680 case SO_DETACH_FILTER:
55b33325 681 ret = sk_detach_filter(sk);
e71a4783 682 break;
1da177e4 683
e71a4783
SH
684 case SO_PASSSEC:
685 if (valbool)
686 set_bit(SOCK_PASSSEC, &sock->flags);
687 else
688 clear_bit(SOCK_PASSSEC, &sock->flags);
689 break;
4a19ec58
LAT
690 case SO_MARK:
691 if (!capable(CAP_NET_ADMIN))
692 ret = -EPERM;
693 else {
694 sk->sk_mark = val;
695 }
696 break;
877ce7c1 697
1da177e4
LT
698 /* We implement the SO_SNDLOWAT etc to
699 not be settable (1003.1g 5.3) */
e71a4783
SH
700 default:
701 ret = -ENOPROTOOPT;
702 break;
4ec93edb 703 }
1da177e4
LT
704 release_sock(sk);
705 return ret;
706}
707
708
709int sock_getsockopt(struct socket *sock, int level, int optname,
710 char __user *optval, int __user *optlen)
711{
712 struct sock *sk = sock->sk;
4ec93edb 713
e71a4783 714 union {
4ec93edb
YH
715 int val;
716 struct linger ling;
1da177e4
LT
717 struct timeval tm;
718 } v;
4ec93edb 719
1da177e4
LT
720 unsigned int lv = sizeof(int);
721 int len;
4ec93edb 722
e71a4783 723 if (get_user(len, optlen))
4ec93edb 724 return -EFAULT;
e71a4783 725 if (len < 0)
1da177e4 726 return -EINVAL;
4ec93edb 727
50fee1de 728 memset(&v, 0, sizeof(v));
df0bca04 729
e71a4783
SH
730 switch(optname) {
731 case SO_DEBUG:
732 v.val = sock_flag(sk, SOCK_DBG);
733 break;
734
735 case SO_DONTROUTE:
736 v.val = sock_flag(sk, SOCK_LOCALROUTE);
737 break;
738
739 case SO_BROADCAST:
740 v.val = !!sock_flag(sk, SOCK_BROADCAST);
741 break;
742
743 case SO_SNDBUF:
744 v.val = sk->sk_sndbuf;
745 break;
746
747 case SO_RCVBUF:
748 v.val = sk->sk_rcvbuf;
749 break;
750
751 case SO_REUSEADDR:
752 v.val = sk->sk_reuse;
753 break;
754
755 case SO_KEEPALIVE:
756 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
757 break;
758
759 case SO_TYPE:
760 v.val = sk->sk_type;
761 break;
762
763 case SO_ERROR:
764 v.val = -sock_error(sk);
765 if (v.val==0)
766 v.val = xchg(&sk->sk_err_soft, 0);
767 break;
768
769 case SO_OOBINLINE:
770 v.val = !!sock_flag(sk, SOCK_URGINLINE);
771 break;
772
773 case SO_NO_CHECK:
774 v.val = sk->sk_no_check;
775 break;
776
777 case SO_PRIORITY:
778 v.val = sk->sk_priority;
779 break;
780
781 case SO_LINGER:
782 lv = sizeof(v.ling);
783 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
784 v.ling.l_linger = sk->sk_lingertime / HZ;
785 break;
786
787 case SO_BSDCOMPAT:
788 sock_warn_obsolete_bsdism("getsockopt");
789 break;
790
791 case SO_TIMESTAMP:
92f37fd2
ED
792 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
793 !sock_flag(sk, SOCK_RCVTSTAMPNS);
794 break;
795
796 case SO_TIMESTAMPNS:
797 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
e71a4783
SH
798 break;
799
20d49473
PO
800 case SO_TIMESTAMPING:
801 v.val = 0;
802 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
803 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
804 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
805 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
806 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
807 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
808 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
809 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
810 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
811 v.val |= SOF_TIMESTAMPING_SOFTWARE;
812 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
813 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
814 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
815 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
816 break;
817
e71a4783
SH
818 case SO_RCVTIMEO:
819 lv=sizeof(struct timeval);
820 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
821 v.tm.tv_sec = 0;
822 v.tm.tv_usec = 0;
823 } else {
824 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
825 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
826 }
827 break;
828
829 case SO_SNDTIMEO:
830 lv=sizeof(struct timeval);
831 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
832 v.tm.tv_sec = 0;
833 v.tm.tv_usec = 0;
834 } else {
835 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
836 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
837 }
838 break;
1da177e4 839
e71a4783
SH
840 case SO_RCVLOWAT:
841 v.val = sk->sk_rcvlowat;
842 break;
1da177e4 843
e71a4783
SH
844 case SO_SNDLOWAT:
845 v.val=1;
846 break;
1da177e4 847
e71a4783
SH
848 case SO_PASSCRED:
849 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
850 break;
1da177e4 851
e71a4783
SH
852 case SO_PEERCRED:
853 if (len > sizeof(sk->sk_peercred))
854 len = sizeof(sk->sk_peercred);
855 if (copy_to_user(optval, &sk->sk_peercred, len))
856 return -EFAULT;
857 goto lenout;
1da177e4 858
e71a4783
SH
859 case SO_PEERNAME:
860 {
861 char address[128];
862
863 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
864 return -ENOTCONN;
865 if (lv < len)
866 return -EINVAL;
867 if (copy_to_user(optval, address, len))
868 return -EFAULT;
869 goto lenout;
870 }
1da177e4 871
e71a4783
SH
872 /* Dubious BSD thing... Probably nobody even uses it, but
873 * the UNIX standard wants it for whatever reason... -DaveM
874 */
875 case SO_ACCEPTCONN:
876 v.val = sk->sk_state == TCP_LISTEN;
877 break;
1da177e4 878
e71a4783
SH
879 case SO_PASSSEC:
880 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
881 break;
877ce7c1 882
e71a4783
SH
883 case SO_PEERSEC:
884 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1da177e4 885
4a19ec58
LAT
886 case SO_MARK:
887 v.val = sk->sk_mark;
888 break;
889
e71a4783
SH
890 default:
891 return -ENOPROTOOPT;
1da177e4 892 }
e71a4783 893
1da177e4
LT
894 if (len > lv)
895 len = lv;
896 if (copy_to_user(optval, &v, len))
897 return -EFAULT;
898lenout:
4ec93edb
YH
899 if (put_user(len, optlen))
900 return -EFAULT;
901 return 0;
1da177e4
LT
902}
903
a5b5bb9a
IM
904/*
905 * Initialize an sk_lock.
906 *
907 * (We also register the sk_lock with the lock validator.)
908 */
b6f99a21 909static inline void sock_lock_init(struct sock *sk)
a5b5bb9a 910{
ed07536e
PZ
911 sock_lock_init_class_and_name(sk,
912 af_family_slock_key_strings[sk->sk_family],
913 af_family_slock_keys + sk->sk_family,
914 af_family_key_strings[sk->sk_family],
915 af_family_keys + sk->sk_family);
a5b5bb9a
IM
916}
917
f1a6c4da
PE
918static void sock_copy(struct sock *nsk, const struct sock *osk)
919{
920#ifdef CONFIG_SECURITY_NETWORK
921 void *sptr = nsk->sk_security;
922#endif
923
924 memcpy(nsk, osk, osk->sk_prot->obj_size);
f1a6c4da
PE
925#ifdef CONFIG_SECURITY_NETWORK
926 nsk->sk_security = sptr;
927 security_sk_clone(osk, nsk);
928#endif
929}
930
2e4afe7b
PE
931static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
932 int family)
c308c1b2
PE
933{
934 struct sock *sk;
935 struct kmem_cache *slab;
936
937 slab = prot->slab;
938 if (slab != NULL)
939 sk = kmem_cache_alloc(slab, priority);
940 else
941 sk = kmalloc(prot->obj_size, priority);
942
2e4afe7b 943 if (sk != NULL) {
a98b65a3
VN
944 kmemcheck_annotate_bitfield(sk, flags);
945
2e4afe7b
PE
946 if (security_sk_alloc(sk, family, priority))
947 goto out_free;
948
949 if (!try_module_get(prot->owner))
950 goto out_free_sec;
951 }
952
c308c1b2 953 return sk;
2e4afe7b
PE
954
955out_free_sec:
956 security_sk_free(sk);
957out_free:
958 if (slab != NULL)
959 kmem_cache_free(slab, sk);
960 else
961 kfree(sk);
962 return NULL;
c308c1b2
PE
963}
964
965static void sk_prot_free(struct proto *prot, struct sock *sk)
966{
967 struct kmem_cache *slab;
2e4afe7b 968 struct module *owner;
c308c1b2 969
2e4afe7b 970 owner = prot->owner;
c308c1b2 971 slab = prot->slab;
2e4afe7b
PE
972
973 security_sk_free(sk);
c308c1b2
PE
974 if (slab != NULL)
975 kmem_cache_free(slab, sk);
976 else
977 kfree(sk);
2e4afe7b 978 module_put(owner);
c308c1b2
PE
979}
980
1da177e4
LT
981/**
982 * sk_alloc - All socket objects are allocated here
c4ea43c5 983 * @net: the applicable net namespace
4dc3b16b
PP
984 * @family: protocol family
985 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
986 * @prot: struct proto associated with this new sock instance
1da177e4 987 */
1b8d7ae4 988struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
6257ff21 989 struct proto *prot)
1da177e4 990{
c308c1b2 991 struct sock *sk;
1da177e4 992
154adbc8 993 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1da177e4 994 if (sk) {
154adbc8
PE
995 sk->sk_family = family;
996 /*
997 * See comment in struct sock definition to understand
998 * why we need sk_prot_creator -acme
999 */
1000 sk->sk_prot = sk->sk_prot_creator = prot;
1001 sock_lock_init(sk);
3b1e0a65 1002 sock_net_set(sk, get_net(net));
1da177e4 1003 }
a79af59e 1004
2e4afe7b 1005 return sk;
1da177e4
LT
1006}
1007
1008void sk_free(struct sock *sk)
1009{
1010 struct sk_filter *filter;
1da177e4
LT
1011
1012 if (sk->sk_destruct)
1013 sk->sk_destruct(sk);
1014
fda9ef5d 1015 filter = rcu_dereference(sk->sk_filter);
1da177e4 1016 if (filter) {
309dd5fc 1017 sk_filter_uncharge(sk, filter);
fda9ef5d 1018 rcu_assign_pointer(sk->sk_filter, NULL);
1da177e4
LT
1019 }
1020
20d49473
PO
1021 sock_disable_timestamp(sk, SOCK_TIMESTAMP);
1022 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
1da177e4
LT
1023
1024 if (atomic_read(&sk->sk_omem_alloc))
1025 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
0dc47877 1026 __func__, atomic_read(&sk->sk_omem_alloc));
1da177e4 1027
3b1e0a65 1028 put_net(sock_net(sk));
c308c1b2 1029 sk_prot_free(sk->sk_prot_creator, sk);
1da177e4
LT
1030}
1031
edf02087
DL
1032/*
1033 * Last sock_put should drop referrence to sk->sk_net. It has already
1034 * been dropped in sk_change_net. Taking referrence to stopping namespace
1035 * is not an option.
1036 * Take referrence to a socket to remove it from hash _alive_ and after that
1037 * destroy it in the context of init_net.
1038 */
1039void sk_release_kernel(struct sock *sk)
1040{
1041 if (sk == NULL || sk->sk_socket == NULL)
1042 return;
1043
1044 sock_hold(sk);
1045 sock_release(sk->sk_socket);
65a18ec5 1046 release_net(sock_net(sk));
3b1e0a65 1047 sock_net_set(sk, get_net(&init_net));
edf02087
DL
1048 sock_put(sk);
1049}
45af1754 1050EXPORT_SYMBOL(sk_release_kernel);
edf02087 1051
dd0fc66f 1052struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
87d11ceb 1053{
8fd1d178 1054 struct sock *newsk;
87d11ceb 1055
8fd1d178 1056 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
87d11ceb
ACM
1057 if (newsk != NULL) {
1058 struct sk_filter *filter;
1059
892c141e 1060 sock_copy(newsk, sk);
87d11ceb
ACM
1061
1062 /* SANITY */
3b1e0a65 1063 get_net(sock_net(newsk));
87d11ceb
ACM
1064 sk_node_init(&newsk->sk_node);
1065 sock_lock_init(newsk);
1066 bh_lock_sock(newsk);
fa438ccf 1067 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
87d11ceb
ACM
1068
1069 atomic_set(&newsk->sk_rmem_alloc, 0);
1070 atomic_set(&newsk->sk_wmem_alloc, 0);
1071 atomic_set(&newsk->sk_omem_alloc, 0);
1072 skb_queue_head_init(&newsk->sk_receive_queue);
1073 skb_queue_head_init(&newsk->sk_write_queue);
97fc2f08
CL
1074#ifdef CONFIG_NET_DMA
1075 skb_queue_head_init(&newsk->sk_async_wait_queue);
1076#endif
87d11ceb
ACM
1077
1078 rwlock_init(&newsk->sk_dst_lock);
1079 rwlock_init(&newsk->sk_callback_lock);
443aef0e
PZ
1080 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1081 af_callback_keys + newsk->sk_family,
1082 af_family_clock_key_strings[newsk->sk_family]);
87d11ceb
ACM
1083
1084 newsk->sk_dst_cache = NULL;
1085 newsk->sk_wmem_queued = 0;
1086 newsk->sk_forward_alloc = 0;
1087 newsk->sk_send_head = NULL;
87d11ceb
ACM
1088 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1089
1090 sock_reset_flag(newsk, SOCK_DONE);
1091 skb_queue_head_init(&newsk->sk_error_queue);
1092
1093 filter = newsk->sk_filter;
1094 if (filter != NULL)
1095 sk_filter_charge(newsk, filter);
1096
1097 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1098 /* It is still raw copy of parent, so invalidate
1099 * destructor and make plain sk_free() */
1100 newsk->sk_destruct = NULL;
1101 sk_free(newsk);
1102 newsk = NULL;
1103 goto out;
1104 }
1105
1106 newsk->sk_err = 0;
1107 newsk->sk_priority = 0;
1108 atomic_set(&newsk->sk_refcnt, 2);
1109
1110 /*
1111 * Increment the counter in the same struct proto as the master
1112 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1113 * is the same as sk->sk_prot->socks, as this field was copied
1114 * with memcpy).
1115 *
1116 * This _changes_ the previous behaviour, where
1117 * tcp_create_openreq_child always was incrementing the
1118 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1119 * to be taken into account in all callers. -acme
1120 */
1121 sk_refcnt_debug_inc(newsk);
972692e0 1122 sk_set_socket(newsk, NULL);
87d11ceb
ACM
1123 newsk->sk_sleep = NULL;
1124
1125 if (newsk->sk_prot->sockets_allocated)
1748376b 1126 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
87d11ceb
ACM
1127 }
1128out:
1129 return newsk;
1130}
1131
1132EXPORT_SYMBOL_GPL(sk_clone);
1133
9958089a
AK
1134void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1135{
1136 __sk_dst_set(sk, dst);
1137 sk->sk_route_caps = dst->dev->features;
1138 if (sk->sk_route_caps & NETIF_F_GSO)
4fcd6b99 1139 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
9958089a 1140 if (sk_can_gso(sk)) {
82cc1a7a 1141 if (dst->header_len) {
9958089a 1142 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
82cc1a7a 1143 } else {
9958089a 1144 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
82cc1a7a
PWJ
1145 sk->sk_gso_max_size = dst->dev->gso_max_size;
1146 }
9958089a
AK
1147 }
1148}
1149EXPORT_SYMBOL_GPL(sk_setup_caps);
1150
1da177e4
LT
1151void __init sk_init(void)
1152{
1153 if (num_physpages <= 4096) {
1154 sysctl_wmem_max = 32767;
1155 sysctl_rmem_max = 32767;
1156 sysctl_wmem_default = 32767;
1157 sysctl_rmem_default = 32767;
1158 } else if (num_physpages >= 131072) {
1159 sysctl_wmem_max = 131071;
1160 sysctl_rmem_max = 131071;
1161 }
1162}
1163
1164/*
1165 * Simple resource managers for sockets.
1166 */
1167
1168
4ec93edb
YH
1169/*
1170 * Write buffer destructor automatically called from kfree_skb.
1da177e4
LT
1171 */
1172void sock_wfree(struct sk_buff *skb)
1173{
1174 struct sock *sk = skb->sk;
1175
1176 /* In case it might be waiting for more memory. */
1177 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1178 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
1179 sk->sk_write_space(sk);
1180 sock_put(sk);
1181}
1182
4ec93edb
YH
1183/*
1184 * Read buffer destructor automatically called from kfree_skb.
1da177e4
LT
1185 */
1186void sock_rfree(struct sk_buff *skb)
1187{
1188 struct sock *sk = skb->sk;
1189
1190 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3ab224be 1191 sk_mem_uncharge(skb->sk, skb->truesize);
1da177e4
LT
1192}
1193
1194
1195int sock_i_uid(struct sock *sk)
1196{
1197 int uid;
1198
1199 read_lock(&sk->sk_callback_lock);
1200 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1201 read_unlock(&sk->sk_callback_lock);
1202 return uid;
1203}
1204
1205unsigned long sock_i_ino(struct sock *sk)
1206{
1207 unsigned long ino;
1208
1209 read_lock(&sk->sk_callback_lock);
1210 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1211 read_unlock(&sk->sk_callback_lock);
1212 return ino;
1213}
1214
1215/*
1216 * Allocate a skb from the socket's send buffer.
1217 */
86a76caf 1218struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
dd0fc66f 1219 gfp_t priority)
1da177e4
LT
1220{
1221 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1222 struct sk_buff * skb = alloc_skb(size, priority);
1223 if (skb) {
1224 skb_set_owner_w(skb, sk);
1225 return skb;
1226 }
1227 }
1228 return NULL;
1229}
1230
1231/*
1232 * Allocate a skb from the socket's receive buffer.
4ec93edb 1233 */
86a76caf 1234struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
dd0fc66f 1235 gfp_t priority)
1da177e4
LT
1236{
1237 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1238 struct sk_buff *skb = alloc_skb(size, priority);
1239 if (skb) {
1240 skb_set_owner_r(skb, sk);
1241 return skb;
1242 }
1243 }
1244 return NULL;
1245}
1246
4ec93edb 1247/*
1da177e4 1248 * Allocate a memory block from the socket's option memory buffer.
4ec93edb 1249 */
dd0fc66f 1250void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1da177e4
LT
1251{
1252 if ((unsigned)size <= sysctl_optmem_max &&
1253 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1254 void *mem;
1255 /* First do the add, to avoid the race if kmalloc
4ec93edb 1256 * might sleep.
1da177e4
LT
1257 */
1258 atomic_add(size, &sk->sk_omem_alloc);
1259 mem = kmalloc(size, priority);
1260 if (mem)
1261 return mem;
1262 atomic_sub(size, &sk->sk_omem_alloc);
1263 }
1264 return NULL;
1265}
1266
1267/*
1268 * Free an option memory block.
1269 */
1270void sock_kfree_s(struct sock *sk, void *mem, int size)
1271{
1272 kfree(mem);
1273 atomic_sub(size, &sk->sk_omem_alloc);
1274}
1275
1276/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1277 I think, these locks should be removed for datagram sockets.
1278 */
1279static long sock_wait_for_wmem(struct sock * sk, long timeo)
1280{
1281 DEFINE_WAIT(wait);
1282
1283 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1284 for (;;) {
1285 if (!timeo)
1286 break;
1287 if (signal_pending(current))
1288 break;
1289 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1290 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1291 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1292 break;
1293 if (sk->sk_shutdown & SEND_SHUTDOWN)
1294 break;
1295 if (sk->sk_err)
1296 break;
1297 timeo = schedule_timeout(timeo);
1298 }
1299 finish_wait(sk->sk_sleep, &wait);
1300 return timeo;
1301}
1302
1303
1304/*
1305 * Generic send/receive buffer handlers
1306 */
1307
4cc7f68d
HX
1308struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1309 unsigned long data_len, int noblock,
1310 int *errcode)
1da177e4
LT
1311{
1312 struct sk_buff *skb;
7d877f3b 1313 gfp_t gfp_mask;
1da177e4
LT
1314 long timeo;
1315 int err;
1316
1317 gfp_mask = sk->sk_allocation;
1318 if (gfp_mask & __GFP_WAIT)
1319 gfp_mask |= __GFP_REPEAT;
1320
1321 timeo = sock_sndtimeo(sk, noblock);
1322 while (1) {
1323 err = sock_error(sk);
1324 if (err != 0)
1325 goto failure;
1326
1327 err = -EPIPE;
1328 if (sk->sk_shutdown & SEND_SHUTDOWN)
1329 goto failure;
1330
1331 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
db38c179 1332 skb = alloc_skb(header_len, gfp_mask);
1da177e4
LT
1333 if (skb) {
1334 int npages;
1335 int i;
1336
1337 /* No pages, we're done... */
1338 if (!data_len)
1339 break;
1340
1341 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1342 skb->truesize += data_len;
1343 skb_shinfo(skb)->nr_frags = npages;
1344 for (i = 0; i < npages; i++) {
1345 struct page *page;
1346 skb_frag_t *frag;
1347
1348 page = alloc_pages(sk->sk_allocation, 0);
1349 if (!page) {
1350 err = -ENOBUFS;
1351 skb_shinfo(skb)->nr_frags = i;
1352 kfree_skb(skb);
1353 goto failure;
1354 }
1355
1356 frag = &skb_shinfo(skb)->frags[i];
1357 frag->page = page;
1358 frag->page_offset = 0;
1359 frag->size = (data_len >= PAGE_SIZE ?
1360 PAGE_SIZE :
1361 data_len);
1362 data_len -= PAGE_SIZE;
1363 }
1364
1365 /* Full success... */
1366 break;
1367 }
1368 err = -ENOBUFS;
1369 goto failure;
1370 }
1371 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1372 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1373 err = -EAGAIN;
1374 if (!timeo)
1375 goto failure;
1376 if (signal_pending(current))
1377 goto interrupted;
1378 timeo = sock_wait_for_wmem(sk, timeo);
1379 }
1380
1381 skb_set_owner_w(skb, sk);
1382 return skb;
1383
1384interrupted:
1385 err = sock_intr_errno(timeo);
1386failure:
1387 *errcode = err;
1388 return NULL;
1389}
4cc7f68d 1390EXPORT_SYMBOL(sock_alloc_send_pskb);
1da177e4 1391
4ec93edb 1392struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1da177e4
LT
1393 int noblock, int *errcode)
1394{
1395 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1396}
1397
1398static void __lock_sock(struct sock *sk)
1399{
1400 DEFINE_WAIT(wait);
1401
e71a4783 1402 for (;;) {
1da177e4
LT
1403 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1404 TASK_UNINTERRUPTIBLE);
1405 spin_unlock_bh(&sk->sk_lock.slock);
1406 schedule();
1407 spin_lock_bh(&sk->sk_lock.slock);
e71a4783 1408 if (!sock_owned_by_user(sk))
1da177e4
LT
1409 break;
1410 }
1411 finish_wait(&sk->sk_lock.wq, &wait);
1412}
1413
1414static void __release_sock(struct sock *sk)
1415{
1416 struct sk_buff *skb = sk->sk_backlog.head;
1417
1418 do {
1419 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1420 bh_unlock_sock(sk);
1421
1422 do {
1423 struct sk_buff *next = skb->next;
1424
1425 skb->next = NULL;
c57943a1 1426 sk_backlog_rcv(sk, skb);
1da177e4
LT
1427
1428 /*
1429 * We are in process context here with softirqs
1430 * disabled, use cond_resched_softirq() to preempt.
1431 * This is safe to do because we've taken the backlog
1432 * queue private:
1433 */
1434 cond_resched_softirq();
1435
1436 skb = next;
1437 } while (skb != NULL);
1438
1439 bh_lock_sock(sk);
e71a4783 1440 } while ((skb = sk->sk_backlog.head) != NULL);
1da177e4
LT
1441}
1442
1443/**
1444 * sk_wait_data - wait for data to arrive at sk_receive_queue
4dc3b16b
PP
1445 * @sk: sock to wait on
1446 * @timeo: for how long
1da177e4
LT
1447 *
1448 * Now socket state including sk->sk_err is changed only under lock,
1449 * hence we may omit checks after joining wait queue.
1450 * We check receive queue before schedule() only as optimization;
1451 * it is very likely that release_sock() added new data.
1452 */
1453int sk_wait_data(struct sock *sk, long *timeo)
1454{
1455 int rc;
1456 DEFINE_WAIT(wait);
1457
1458 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1459 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1460 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1461 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1462 finish_wait(sk->sk_sleep, &wait);
1463 return rc;
1464}
1465
1466EXPORT_SYMBOL(sk_wait_data);
1467
3ab224be
HA
1468/**
1469 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1470 * @sk: socket
1471 * @size: memory size to allocate
1472 * @kind: allocation type
1473 *
1474 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1475 * rmem allocation. This function assumes that protocols which have
1476 * memory_pressure use sk_wmem_queued as write buffer accounting.
1477 */
1478int __sk_mem_schedule(struct sock *sk, int size, int kind)
1479{
1480 struct proto *prot = sk->sk_prot;
1481 int amt = sk_mem_pages(size);
1482 int allocated;
1483
1484 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1485 allocated = atomic_add_return(amt, prot->memory_allocated);
1486
1487 /* Under limit. */
1488 if (allocated <= prot->sysctl_mem[0]) {
1489 if (prot->memory_pressure && *prot->memory_pressure)
1490 *prot->memory_pressure = 0;
1491 return 1;
1492 }
1493
1494 /* Under pressure. */
1495 if (allocated > prot->sysctl_mem[1])
1496 if (prot->enter_memory_pressure)
5c52ba17 1497 prot->enter_memory_pressure(sk);
3ab224be
HA
1498
1499 /* Over hard limit. */
1500 if (allocated > prot->sysctl_mem[2])
1501 goto suppress_allocation;
1502
1503 /* guarantee minimum buffer size under pressure */
1504 if (kind == SK_MEM_RECV) {
1505 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1506 return 1;
1507 } else { /* SK_MEM_SEND */
1508 if (sk->sk_type == SOCK_STREAM) {
1509 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1510 return 1;
1511 } else if (atomic_read(&sk->sk_wmem_alloc) <
1512 prot->sysctl_wmem[0])
1513 return 1;
1514 }
1515
1516 if (prot->memory_pressure) {
1748376b
ED
1517 int alloc;
1518
1519 if (!*prot->memory_pressure)
1520 return 1;
1521 alloc = percpu_counter_read_positive(prot->sockets_allocated);
1522 if (prot->sysctl_mem[2] > alloc *
3ab224be
HA
1523 sk_mem_pages(sk->sk_wmem_queued +
1524 atomic_read(&sk->sk_rmem_alloc) +
1525 sk->sk_forward_alloc))
1526 return 1;
1527 }
1528
1529suppress_allocation:
1530
1531 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1532 sk_stream_moderate_sndbuf(sk);
1533
1534 /* Fail only if socket is _under_ its sndbuf.
1535 * In this case we cannot block, so that we have to fail.
1536 */
1537 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1538 return 1;
1539 }
1540
1541 /* Alas. Undo changes. */
1542 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1543 atomic_sub(amt, prot->memory_allocated);
1544 return 0;
1545}
1546
1547EXPORT_SYMBOL(__sk_mem_schedule);
1548
1549/**
1550 * __sk_reclaim - reclaim memory_allocated
1551 * @sk: socket
1552 */
1553void __sk_mem_reclaim(struct sock *sk)
1554{
1555 struct proto *prot = sk->sk_prot;
1556
680a5a50 1557 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
3ab224be
HA
1558 prot->memory_allocated);
1559 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1560
1561 if (prot->memory_pressure && *prot->memory_pressure &&
1562 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1563 *prot->memory_pressure = 0;
1564}
1565
1566EXPORT_SYMBOL(__sk_mem_reclaim);
1567
1568
1da177e4
LT
1569/*
1570 * Set of default routines for initialising struct proto_ops when
1571 * the protocol does not support a particular function. In certain
1572 * cases where it makes no sense for a protocol to have a "do nothing"
1573 * function, some default processing is provided.
1574 */
1575
1576int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1577{
1578 return -EOPNOTSUPP;
1579}
1580
4ec93edb 1581int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1da177e4
LT
1582 int len, int flags)
1583{
1584 return -EOPNOTSUPP;
1585}
1586
1587int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1588{
1589 return -EOPNOTSUPP;
1590}
1591
1592int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1593{
1594 return -EOPNOTSUPP;
1595}
1596
4ec93edb 1597int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1da177e4
LT
1598 int *len, int peer)
1599{
1600 return -EOPNOTSUPP;
1601}
1602
1603unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
1604{
1605 return 0;
1606}
1607
1608int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1609{
1610 return -EOPNOTSUPP;
1611}
1612
1613int sock_no_listen(struct socket *sock, int backlog)
1614{
1615 return -EOPNOTSUPP;
1616}
1617
1618int sock_no_shutdown(struct socket *sock, int how)
1619{
1620 return -EOPNOTSUPP;
1621}
1622
1623int sock_no_setsockopt(struct socket *sock, int level, int optname,
1624 char __user *optval, int optlen)
1625{
1626 return -EOPNOTSUPP;
1627}
1628
1629int sock_no_getsockopt(struct socket *sock, int level, int optname,
1630 char __user *optval, int __user *optlen)
1631{
1632 return -EOPNOTSUPP;
1633}
1634
1635int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1636 size_t len)
1637{
1638 return -EOPNOTSUPP;
1639}
1640
1641int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1642 size_t len, int flags)
1643{
1644 return -EOPNOTSUPP;
1645}
1646
1647int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1648{
1649 /* Mirror missing mmap method error code */
1650 return -ENODEV;
1651}
1652
1653ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1654{
1655 ssize_t res;
1656 struct msghdr msg = {.msg_flags = flags};
1657 struct kvec iov;
1658 char *kaddr = kmap(page);
1659 iov.iov_base = kaddr + offset;
1660 iov.iov_len = size;
1661 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1662 kunmap(page);
1663 return res;
1664}
1665
1666/*
1667 * Default Socket Callbacks
1668 */
1669
1670static void sock_def_wakeup(struct sock *sk)
1671{
1672 read_lock(&sk->sk_callback_lock);
1673 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1674 wake_up_interruptible_all(sk->sk_sleep);
1675 read_unlock(&sk->sk_callback_lock);
1676}
1677
1678static void sock_def_error_report(struct sock *sk)
1679{
1680 read_lock(&sk->sk_callback_lock);
1681 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
37e5540b 1682 wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
8d8ad9d7 1683 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1da177e4
LT
1684 read_unlock(&sk->sk_callback_lock);
1685}
1686
1687static void sock_def_readable(struct sock *sk, int len)
1688{
1689 read_lock(&sk->sk_callback_lock);
1690 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
37e5540b
DL
1691 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
1692 POLLRDNORM | POLLRDBAND);
8d8ad9d7 1693 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1da177e4
LT
1694 read_unlock(&sk->sk_callback_lock);
1695}
1696
1697static void sock_def_write_space(struct sock *sk)
1698{
1699 read_lock(&sk->sk_callback_lock);
1700
1701 /* Do not wake up a writer until he can make "significant"
1702 * progress. --DaveM
1703 */
e71a4783 1704 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1da177e4 1705 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
37e5540b
DL
1706 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
1707 POLLWRNORM | POLLWRBAND);
1da177e4
LT
1708
1709 /* Should agree with poll, otherwise some programs break */
1710 if (sock_writeable(sk))
8d8ad9d7 1711 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1da177e4
LT
1712 }
1713
1714 read_unlock(&sk->sk_callback_lock);
1715}
1716
1717static void sock_def_destruct(struct sock *sk)
1718{
a51482bd 1719 kfree(sk->sk_protinfo);
1da177e4
LT
1720}
1721
1722void sk_send_sigurg(struct sock *sk)
1723{
1724 if (sk->sk_socket && sk->sk_socket->file)
1725 if (send_sigurg(&sk->sk_socket->file->f_owner))
8d8ad9d7 1726 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1da177e4
LT
1727}
1728
1729void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1730 unsigned long expires)
1731{
1732 if (!mod_timer(timer, expires))
1733 sock_hold(sk);
1734}
1735
1736EXPORT_SYMBOL(sk_reset_timer);
1737
1738void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1739{
1740 if (timer_pending(timer) && del_timer(timer))
1741 __sock_put(sk);
1742}
1743
1744EXPORT_SYMBOL(sk_stop_timer);
1745
1746void sock_init_data(struct socket *sock, struct sock *sk)
1747{
1748 skb_queue_head_init(&sk->sk_receive_queue);
1749 skb_queue_head_init(&sk->sk_write_queue);
1750 skb_queue_head_init(&sk->sk_error_queue);
97fc2f08
CL
1751#ifdef CONFIG_NET_DMA
1752 skb_queue_head_init(&sk->sk_async_wait_queue);
1753#endif
1da177e4
LT
1754
1755 sk->sk_send_head = NULL;
1756
1757 init_timer(&sk->sk_timer);
4ec93edb 1758
1da177e4
LT
1759 sk->sk_allocation = GFP_KERNEL;
1760 sk->sk_rcvbuf = sysctl_rmem_default;
1761 sk->sk_sndbuf = sysctl_wmem_default;
1762 sk->sk_state = TCP_CLOSE;
972692e0 1763 sk_set_socket(sk, sock);
1da177e4
LT
1764
1765 sock_set_flag(sk, SOCK_ZAPPED);
1766
e71a4783 1767 if (sock) {
1da177e4
LT
1768 sk->sk_type = sock->type;
1769 sk->sk_sleep = &sock->wait;
1770 sock->sk = sk;
1771 } else
1772 sk->sk_sleep = NULL;
1773
1774 rwlock_init(&sk->sk_dst_lock);
1775 rwlock_init(&sk->sk_callback_lock);
443aef0e
PZ
1776 lockdep_set_class_and_name(&sk->sk_callback_lock,
1777 af_callback_keys + sk->sk_family,
1778 af_family_clock_key_strings[sk->sk_family]);
1da177e4
LT
1779
1780 sk->sk_state_change = sock_def_wakeup;
1781 sk->sk_data_ready = sock_def_readable;
1782 sk->sk_write_space = sock_def_write_space;
1783 sk->sk_error_report = sock_def_error_report;
1784 sk->sk_destruct = sock_def_destruct;
1785
1786 sk->sk_sndmsg_page = NULL;
1787 sk->sk_sndmsg_off = 0;
1788
1789 sk->sk_peercred.pid = 0;
1790 sk->sk_peercred.uid = -1;
1791 sk->sk_peercred.gid = -1;
1792 sk->sk_write_pending = 0;
1793 sk->sk_rcvlowat = 1;
1794 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1795 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1796
f37f0afb 1797 sk->sk_stamp = ktime_set(-1L, 0);
1da177e4
LT
1798
1799 atomic_set(&sk->sk_refcnt, 1);
33c732c3 1800 atomic_set(&sk->sk_drops, 0);
1da177e4
LT
1801}
1802
b5606c2d 1803void lock_sock_nested(struct sock *sk, int subclass)
1da177e4
LT
1804{
1805 might_sleep();
a5b5bb9a 1806 spin_lock_bh(&sk->sk_lock.slock);
d2e9117c 1807 if (sk->sk_lock.owned)
1da177e4 1808 __lock_sock(sk);
d2e9117c 1809 sk->sk_lock.owned = 1;
a5b5bb9a
IM
1810 spin_unlock(&sk->sk_lock.slock);
1811 /*
1812 * The sk_lock has mutex_lock() semantics here:
1813 */
fcc70d5f 1814 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
a5b5bb9a 1815 local_bh_enable();
1da177e4
LT
1816}
1817
fcc70d5f 1818EXPORT_SYMBOL(lock_sock_nested);
1da177e4 1819
b5606c2d 1820void release_sock(struct sock *sk)
1da177e4 1821{
a5b5bb9a
IM
1822 /*
1823 * The sk_lock has mutex_unlock() semantics:
1824 */
1825 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
1826
1827 spin_lock_bh(&sk->sk_lock.slock);
1da177e4
LT
1828 if (sk->sk_backlog.tail)
1829 __release_sock(sk);
d2e9117c 1830 sk->sk_lock.owned = 0;
a5b5bb9a
IM
1831 if (waitqueue_active(&sk->sk_lock.wq))
1832 wake_up(&sk->sk_lock.wq);
1833 spin_unlock_bh(&sk->sk_lock.slock);
1da177e4
LT
1834}
1835EXPORT_SYMBOL(release_sock);
1836
1837int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
4ec93edb 1838{
b7aa0bf7 1839 struct timeval tv;
1da177e4 1840 if (!sock_flag(sk, SOCK_TIMESTAMP))
20d49473 1841 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
b7aa0bf7
ED
1842 tv = ktime_to_timeval(sk->sk_stamp);
1843 if (tv.tv_sec == -1)
1da177e4 1844 return -ENOENT;
b7aa0bf7
ED
1845 if (tv.tv_sec == 0) {
1846 sk->sk_stamp = ktime_get_real();
1847 tv = ktime_to_timeval(sk->sk_stamp);
1848 }
1849 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
4ec93edb 1850}
1da177e4
LT
1851EXPORT_SYMBOL(sock_get_timestamp);
1852
ae40eb1e
ED
1853int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
1854{
1855 struct timespec ts;
1856 if (!sock_flag(sk, SOCK_TIMESTAMP))
20d49473 1857 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
ae40eb1e
ED
1858 ts = ktime_to_timespec(sk->sk_stamp);
1859 if (ts.tv_sec == -1)
1860 return -ENOENT;
1861 if (ts.tv_sec == 0) {
1862 sk->sk_stamp = ktime_get_real();
1863 ts = ktime_to_timespec(sk->sk_stamp);
1864 }
1865 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
1866}
1867EXPORT_SYMBOL(sock_get_timestampns);
1868
20d49473 1869void sock_enable_timestamp(struct sock *sk, int flag)
4ec93edb 1870{
20d49473
PO
1871 if (!sock_flag(sk, flag)) {
1872 sock_set_flag(sk, flag);
1873 /*
1874 * we just set one of the two flags which require net
1875 * time stamping, but time stamping might have been on
1876 * already because of the other one
1877 */
1878 if (!sock_flag(sk,
1879 flag == SOCK_TIMESTAMP ?
1880 SOCK_TIMESTAMPING_RX_SOFTWARE :
1881 SOCK_TIMESTAMP))
1882 net_enable_timestamp();
1da177e4
LT
1883 }
1884}
1da177e4
LT
1885
1886/*
1887 * Get a socket option on an socket.
1888 *
1889 * FIX: POSIX 1003.1g is very ambiguous here. It states that
1890 * asynchronous errors should be reported by getsockopt. We assume
1891 * this means if you specify SO_ERROR (otherwise whats the point of it).
1892 */
1893int sock_common_getsockopt(struct socket *sock, int level, int optname,
1894 char __user *optval, int __user *optlen)
1895{
1896 struct sock *sk = sock->sk;
1897
1898 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1899}
1900
1901EXPORT_SYMBOL(sock_common_getsockopt);
1902
3fdadf7d 1903#ifdef CONFIG_COMPAT
543d9cfe
ACM
1904int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
1905 char __user *optval, int __user *optlen)
3fdadf7d
DM
1906{
1907 struct sock *sk = sock->sk;
1908
1e51f951 1909 if (sk->sk_prot->compat_getsockopt != NULL)
543d9cfe
ACM
1910 return sk->sk_prot->compat_getsockopt(sk, level, optname,
1911 optval, optlen);
3fdadf7d
DM
1912 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1913}
1914EXPORT_SYMBOL(compat_sock_common_getsockopt);
1915#endif
1916
1da177e4
LT
1917int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1918 struct msghdr *msg, size_t size, int flags)
1919{
1920 struct sock *sk = sock->sk;
1921 int addr_len = 0;
1922 int err;
1923
1924 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
1925 flags & ~MSG_DONTWAIT, &addr_len);
1926 if (err >= 0)
1927 msg->msg_namelen = addr_len;
1928 return err;
1929}
1930
1931EXPORT_SYMBOL(sock_common_recvmsg);
1932
1933/*
1934 * Set socket options on an inet socket.
1935 */
1936int sock_common_setsockopt(struct socket *sock, int level, int optname,
1937 char __user *optval, int optlen)
1938{
1939 struct sock *sk = sock->sk;
1940
1941 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1942}
1943
1944EXPORT_SYMBOL(sock_common_setsockopt);
1945
3fdadf7d 1946#ifdef CONFIG_COMPAT
543d9cfe
ACM
1947int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
1948 char __user *optval, int optlen)
3fdadf7d
DM
1949{
1950 struct sock *sk = sock->sk;
1951
543d9cfe
ACM
1952 if (sk->sk_prot->compat_setsockopt != NULL)
1953 return sk->sk_prot->compat_setsockopt(sk, level, optname,
1954 optval, optlen);
3fdadf7d
DM
1955 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1956}
1957EXPORT_SYMBOL(compat_sock_common_setsockopt);
1958#endif
1959
1da177e4
LT
1960void sk_common_release(struct sock *sk)
1961{
1962 if (sk->sk_prot->destroy)
1963 sk->sk_prot->destroy(sk);
1964
1965 /*
1966 * Observation: when sock_common_release is called, processes have
1967 * no access to socket. But net still has.
1968 * Step one, detach it from networking:
1969 *
1970 * A. Remove from hash tables.
1971 */
1972
1973 sk->sk_prot->unhash(sk);
1974
1975 /*
1976 * In this point socket cannot receive new packets, but it is possible
1977 * that some packets are in flight because some CPU runs receiver and
1978 * did hash table lookup before we unhashed socket. They will achieve
1979 * receive queue and will be purged by socket destructor.
1980 *
1981 * Also we still have packets pending on receive queue and probably,
1982 * our own packets waiting in device queues. sock_destroy will drain
1983 * receive queue, but transmitted packets will delay socket destruction
1984 * until the last reference will be released.
1985 */
1986
1987 sock_orphan(sk);
1988
1989 xfrm_sk_free_policy(sk);
1990
e6848976 1991 sk_refcnt_debug_release(sk);
1da177e4
LT
1992 sock_put(sk);
1993}
1994
1995EXPORT_SYMBOL(sk_common_release);
1996
1997static DEFINE_RWLOCK(proto_list_lock);
1998static LIST_HEAD(proto_list);
1999
13ff3d6f
PE
2000#ifdef CONFIG_PROC_FS
2001#define PROTO_INUSE_NR 64 /* should be enough for the first time */
1338d466
PE
2002struct prot_inuse {
2003 int val[PROTO_INUSE_NR];
2004};
13ff3d6f
PE
2005
2006static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
70ee1159
PE
2007
2008#ifdef CONFIG_NET_NS
2009void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2010{
2011 int cpu = smp_processor_id();
2012 per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val;
2013}
2014EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2015
2016int sock_prot_inuse_get(struct net *net, struct proto *prot)
2017{
2018 int cpu, idx = prot->inuse_idx;
2019 int res = 0;
2020
2021 for_each_possible_cpu(cpu)
2022 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2023
2024 return res >= 0 ? res : 0;
2025}
2026EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2027
2028static int sock_inuse_init_net(struct net *net)
2029{
2030 net->core.inuse = alloc_percpu(struct prot_inuse);
2031 return net->core.inuse ? 0 : -ENOMEM;
2032}
2033
2034static void sock_inuse_exit_net(struct net *net)
2035{
2036 free_percpu(net->core.inuse);
2037}
2038
2039static struct pernet_operations net_inuse_ops = {
2040 .init = sock_inuse_init_net,
2041 .exit = sock_inuse_exit_net,
2042};
2043
2044static __init int net_inuse_init(void)
2045{
2046 if (register_pernet_subsys(&net_inuse_ops))
2047 panic("Cannot initialize net inuse counters");
2048
2049 return 0;
2050}
2051
2052core_initcall(net_inuse_init);
2053#else
1338d466
PE
2054static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2055
c29a0bc4 2056void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
1338d466
PE
2057{
2058 __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val;
2059}
2060EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2061
c29a0bc4 2062int sock_prot_inuse_get(struct net *net, struct proto *prot)
1338d466
PE
2063{
2064 int cpu, idx = prot->inuse_idx;
2065 int res = 0;
2066
2067 for_each_possible_cpu(cpu)
2068 res += per_cpu(prot_inuse, cpu).val[idx];
2069
2070 return res >= 0 ? res : 0;
2071}
2072EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
70ee1159 2073#endif
13ff3d6f
PE
2074
2075static void assign_proto_idx(struct proto *prot)
2076{
2077 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2078
2079 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2080 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2081 return;
2082 }
2083
2084 set_bit(prot->inuse_idx, proto_inuse_idx);
2085}
2086
2087static void release_proto_idx(struct proto *prot)
2088{
2089 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2090 clear_bit(prot->inuse_idx, proto_inuse_idx);
2091}
2092#else
2093static inline void assign_proto_idx(struct proto *prot)
2094{
2095}
2096
2097static inline void release_proto_idx(struct proto *prot)
2098{
2099}
2100#endif
2101
b733c007
PE
2102int proto_register(struct proto *prot, int alloc_slab)
2103{
1da177e4
LT
2104 if (alloc_slab) {
2105 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
271b72c7
ED
2106 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2107 NULL);
1da177e4
LT
2108
2109 if (prot->slab == NULL) {
2110 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2111 prot->name);
60e7663d 2112 goto out;
1da177e4 2113 }
2e6599cb
ACM
2114
2115 if (prot->rsk_prot != NULL) {
2116 static const char mask[] = "request_sock_%s";
2117
7e56b5d6
CM
2118 prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
2119 if (prot->rsk_prot->slab_name == NULL)
2e6599cb
ACM
2120 goto out_free_sock_slab;
2121
7e56b5d6
CM
2122 sprintf(prot->rsk_prot->slab_name, mask, prot->name);
2123 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2e6599cb 2124 prot->rsk_prot->obj_size, 0,
20c2df83 2125 SLAB_HWCACHE_ALIGN, NULL);
2e6599cb
ACM
2126
2127 if (prot->rsk_prot->slab == NULL) {
2128 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2129 prot->name);
2130 goto out_free_request_sock_slab_name;
2131 }
2132 }
8feaf0c0 2133
6d6ee43e 2134 if (prot->twsk_prot != NULL) {
8feaf0c0
ACM
2135 static const char mask[] = "tw_sock_%s";
2136
7e56b5d6 2137 prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
8feaf0c0 2138
7e56b5d6 2139 if (prot->twsk_prot->twsk_slab_name == NULL)
8feaf0c0
ACM
2140 goto out_free_request_sock_slab;
2141
7e56b5d6 2142 sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name);
6d6ee43e 2143 prot->twsk_prot->twsk_slab =
7e56b5d6 2144 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
6d6ee43e 2145 prot->twsk_prot->twsk_obj_size,
3ab5aee7
ED
2146 0,
2147 SLAB_HWCACHE_ALIGN |
2148 prot->slab_flags,
20c2df83 2149 NULL);
6d6ee43e 2150 if (prot->twsk_prot->twsk_slab == NULL)
8feaf0c0
ACM
2151 goto out_free_timewait_sock_slab_name;
2152 }
1da177e4
LT
2153 }
2154
2a278051 2155 write_lock(&proto_list_lock);
1da177e4 2156 list_add(&prot->node, &proto_list);
13ff3d6f 2157 assign_proto_idx(prot);
1da177e4 2158 write_unlock(&proto_list_lock);
b733c007
PE
2159 return 0;
2160
8feaf0c0 2161out_free_timewait_sock_slab_name:
7e56b5d6 2162 kfree(prot->twsk_prot->twsk_slab_name);
8feaf0c0
ACM
2163out_free_request_sock_slab:
2164 if (prot->rsk_prot && prot->rsk_prot->slab) {
2165 kmem_cache_destroy(prot->rsk_prot->slab);
2166 prot->rsk_prot->slab = NULL;
2167 }
2e6599cb 2168out_free_request_sock_slab_name:
7e56b5d6 2169 kfree(prot->rsk_prot->slab_name);
2e6599cb
ACM
2170out_free_sock_slab:
2171 kmem_cache_destroy(prot->slab);
2172 prot->slab = NULL;
b733c007
PE
2173out:
2174 return -ENOBUFS;
1da177e4
LT
2175}
2176
2177EXPORT_SYMBOL(proto_register);
2178
2179void proto_unregister(struct proto *prot)
2180{
2181 write_lock(&proto_list_lock);
13ff3d6f 2182 release_proto_idx(prot);
0a3f4358
PM
2183 list_del(&prot->node);
2184 write_unlock(&proto_list_lock);
1da177e4
LT
2185
2186 if (prot->slab != NULL) {
2187 kmem_cache_destroy(prot->slab);
2188 prot->slab = NULL;
2189 }
2190
2e6599cb 2191 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2e6599cb 2192 kmem_cache_destroy(prot->rsk_prot->slab);
7e56b5d6 2193 kfree(prot->rsk_prot->slab_name);
2e6599cb
ACM
2194 prot->rsk_prot->slab = NULL;
2195 }
2196
6d6ee43e 2197 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
6d6ee43e 2198 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
7e56b5d6 2199 kfree(prot->twsk_prot->twsk_slab_name);
6d6ee43e 2200 prot->twsk_prot->twsk_slab = NULL;
8feaf0c0 2201 }
1da177e4
LT
2202}
2203
2204EXPORT_SYMBOL(proto_unregister);
2205
2206#ifdef CONFIG_PROC_FS
1da177e4 2207static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
9a429c49 2208 __acquires(proto_list_lock)
1da177e4
LT
2209{
2210 read_lock(&proto_list_lock);
60f0438a 2211 return seq_list_start_head(&proto_list, *pos);
1da177e4
LT
2212}
2213
2214static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2215{
60f0438a 2216 return seq_list_next(v, &proto_list, pos);
1da177e4
LT
2217}
2218
2219static void proto_seq_stop(struct seq_file *seq, void *v)
9a429c49 2220 __releases(proto_list_lock)
1da177e4
LT
2221{
2222 read_unlock(&proto_list_lock);
2223}
2224
2225static char proto_method_implemented(const void *method)
2226{
2227 return method == NULL ? 'n' : 'y';
2228}
2229
2230static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2231{
2232 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
2233 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2234 proto->name,
2235 proto->obj_size,
14e943db 2236 sock_prot_inuse_get(seq_file_net(seq), proto),
1da177e4
LT
2237 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
2238 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2239 proto->max_header,
2240 proto->slab == NULL ? "no" : "yes",
2241 module_name(proto->owner),
2242 proto_method_implemented(proto->close),
2243 proto_method_implemented(proto->connect),
2244 proto_method_implemented(proto->disconnect),
2245 proto_method_implemented(proto->accept),
2246 proto_method_implemented(proto->ioctl),
2247 proto_method_implemented(proto->init),
2248 proto_method_implemented(proto->destroy),
2249 proto_method_implemented(proto->shutdown),
2250 proto_method_implemented(proto->setsockopt),
2251 proto_method_implemented(proto->getsockopt),
2252 proto_method_implemented(proto->sendmsg),
2253 proto_method_implemented(proto->recvmsg),
2254 proto_method_implemented(proto->sendpage),
2255 proto_method_implemented(proto->bind),
2256 proto_method_implemented(proto->backlog_rcv),
2257 proto_method_implemented(proto->hash),
2258 proto_method_implemented(proto->unhash),
2259 proto_method_implemented(proto->get_port),
2260 proto_method_implemented(proto->enter_memory_pressure));
2261}
2262
2263static int proto_seq_show(struct seq_file *seq, void *v)
2264{
60f0438a 2265 if (v == &proto_list)
1da177e4
LT
2266 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2267 "protocol",
2268 "size",
2269 "sockets",
2270 "memory",
2271 "press",
2272 "maxhdr",
2273 "slab",
2274 "module",
2275 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2276 else
60f0438a 2277 proto_seq_printf(seq, list_entry(v, struct proto, node));
1da177e4
LT
2278 return 0;
2279}
2280
f690808e 2281static const struct seq_operations proto_seq_ops = {
1da177e4
LT
2282 .start = proto_seq_start,
2283 .next = proto_seq_next,
2284 .stop = proto_seq_stop,
2285 .show = proto_seq_show,
2286};
2287
2288static int proto_seq_open(struct inode *inode, struct file *file)
2289{
14e943db
ED
2290 return seq_open_net(inode, file, &proto_seq_ops,
2291 sizeof(struct seq_net_private));
1da177e4
LT
2292}
2293
9a32144e 2294static const struct file_operations proto_seq_fops = {
1da177e4
LT
2295 .owner = THIS_MODULE,
2296 .open = proto_seq_open,
2297 .read = seq_read,
2298 .llseek = seq_lseek,
14e943db
ED
2299 .release = seq_release_net,
2300};
2301
2302static __net_init int proto_init_net(struct net *net)
2303{
2304 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2305 return -ENOMEM;
2306
2307 return 0;
2308}
2309
2310static __net_exit void proto_exit_net(struct net *net)
2311{
2312 proc_net_remove(net, "protocols");
2313}
2314
2315
2316static __net_initdata struct pernet_operations proto_net_ops = {
2317 .init = proto_init_net,
2318 .exit = proto_exit_net,
1da177e4
LT
2319};
2320
2321static int __init proto_init(void)
2322{
14e943db 2323 return register_pernet_subsys(&proto_net_ops);
1da177e4
LT
2324}
2325
2326subsys_initcall(proto_init);
2327
2328#endif /* PROC_FS */
2329
2330EXPORT_SYMBOL(sk_alloc);
2331EXPORT_SYMBOL(sk_free);
2332EXPORT_SYMBOL(sk_send_sigurg);
2333EXPORT_SYMBOL(sock_alloc_send_skb);
2334EXPORT_SYMBOL(sock_init_data);
2335EXPORT_SYMBOL(sock_kfree_s);
2336EXPORT_SYMBOL(sock_kmalloc);
2337EXPORT_SYMBOL(sock_no_accept);
2338EXPORT_SYMBOL(sock_no_bind);
2339EXPORT_SYMBOL(sock_no_connect);
2340EXPORT_SYMBOL(sock_no_getname);
2341EXPORT_SYMBOL(sock_no_getsockopt);
2342EXPORT_SYMBOL(sock_no_ioctl);
2343EXPORT_SYMBOL(sock_no_listen);
2344EXPORT_SYMBOL(sock_no_mmap);
2345EXPORT_SYMBOL(sock_no_poll);
2346EXPORT_SYMBOL(sock_no_recvmsg);
2347EXPORT_SYMBOL(sock_no_sendmsg);
2348EXPORT_SYMBOL(sock_no_sendpage);
2349EXPORT_SYMBOL(sock_no_setsockopt);
2350EXPORT_SYMBOL(sock_no_shutdown);
2351EXPORT_SYMBOL(sock_no_socketpair);
2352EXPORT_SYMBOL(sock_rfree);
2353EXPORT_SYMBOL(sock_setsockopt);
2354EXPORT_SYMBOL(sock_wfree);
2355EXPORT_SYMBOL(sock_wmalloc);
2356EXPORT_SYMBOL(sock_i_uid);
2357EXPORT_SYMBOL(sock_i_ino);
1da177e4 2358EXPORT_SYMBOL(sysctl_optmem_max);