Merge tag 'v3.10.66' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / core / sock.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
4ec93edb 35 * code. The ACK stuff can wait and needs major
1da177e4
LT
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
e005d193
JP
92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
4fc268d2 94#include <linux/capability.h>
1da177e4
LT
95#include <linux/errno.h>
96#include <linux/types.h>
97#include <linux/socket.h>
98#include <linux/in.h>
99#include <linux/kernel.h>
1da177e4
LT
100#include <linux/module.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/sched.h>
104#include <linux/timer.h>
105#include <linux/string.h>
106#include <linux/sockios.h>
107#include <linux/net.h>
108#include <linux/mm.h>
109#include <linux/slab.h>
110#include <linux/interrupt.h>
111#include <linux/poll.h>
112#include <linux/tcp.h>
113#include <linux/init.h>
a1f8e7f7 114#include <linux/highmem.h>
3f551f94 115#include <linux/user_namespace.h>
c5905afb 116#include <linux/static_key.h>
3969eb38 117#include <linux/memcontrol.h>
8c1ae10d 118#include <linux/prefetch.h>
1da177e4
LT
119
120#include <asm/uaccess.h>
1da177e4
LT
121
122#include <linux/netdevice.h>
123#include <net/protocol.h>
124#include <linux/skbuff.h>
457c4cbc 125#include <net/net_namespace.h>
2e6599cb 126#include <net/request_sock.h>
1da177e4 127#include <net/sock.h>
20d49473 128#include <linux/net_tstamp.h>
1da177e4
LT
129#include <net/xfrm.h>
130#include <linux/ipsec.h>
f8451725 131#include <net/cls_cgroup.h>
5bc1421e 132#include <net/netprio_cgroup.h>
1da177e4
LT
133
134#include <linux/filter.h>
135
3847ce32
SM
136#include <trace/events/sock.h>
137
6fa3eb70
S
138#include <net/af_unix.h>
139
140
1da177e4
LT
141#ifdef CONFIG_INET
142#include <net/tcp.h>
143#endif
6fa3eb70 144#include <linux/xlog.h>
1da177e4 145
36b77a52 146static DEFINE_MUTEX(proto_list_mutex);
d1a4c0b3
GC
147static LIST_HEAD(proto_list);
148
c35b4e28
EB
149/**
150 * sk_ns_capable - General socket capability test
151 * @sk: Socket to use a capability on or through
152 * @user_ns: The user namespace of the capability to use
153 * @cap: The capability to use
154 *
155 * Test to see if the opener of the socket had when the socket was
156 * created and the current process has the capability @cap in the user
157 * namespace @user_ns.
158 */
159bool sk_ns_capable(const struct sock *sk,
160 struct user_namespace *user_ns, int cap)
161{
162 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
163 ns_capable(user_ns, cap);
164}
165EXPORT_SYMBOL(sk_ns_capable);
166
167/**
168 * sk_capable - Socket global capability test
169 * @sk: Socket to use a capability on or through
170 * @cap: The global capbility to use
171 *
172 * Test to see if the opener of the socket had when the socket was
173 * created and the current process has the capability @cap in all user
174 * namespaces.
175 */
176bool sk_capable(const struct sock *sk, int cap)
177{
178 return sk_ns_capable(sk, &init_user_ns, cap);
179}
180EXPORT_SYMBOL(sk_capable);
181
182/**
183 * sk_net_capable - Network namespace socket capability test
184 * @sk: Socket to use a capability on or through
185 * @cap: The capability to use
186 *
187 * Test to see if the opener of the socket had when the socke was created
188 * and the current process has the capability @cap over the network namespace
189 * the socket is a member of.
190 */
191bool sk_net_capable(const struct sock *sk, int cap)
192{
193 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
194}
195EXPORT_SYMBOL(sk_net_capable);
196
197
c255a458 198#ifdef CONFIG_MEMCG_KMEM
1d62e436 199int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
d1a4c0b3
GC
200{
201 struct proto *proto;
202 int ret = 0;
203
36b77a52 204 mutex_lock(&proto_list_mutex);
d1a4c0b3
GC
205 list_for_each_entry(proto, &proto_list, node) {
206 if (proto->init_cgroup) {
1d62e436 207 ret = proto->init_cgroup(memcg, ss);
d1a4c0b3
GC
208 if (ret)
209 goto out;
210 }
211 }
212
36b77a52 213 mutex_unlock(&proto_list_mutex);
d1a4c0b3
GC
214 return ret;
215out:
216 list_for_each_entry_continue_reverse(proto, &proto_list, node)
217 if (proto->destroy_cgroup)
1d62e436 218 proto->destroy_cgroup(memcg);
36b77a52 219 mutex_unlock(&proto_list_mutex);
d1a4c0b3
GC
220 return ret;
221}
222
1d62e436 223void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
d1a4c0b3
GC
224{
225 struct proto *proto;
226
36b77a52 227 mutex_lock(&proto_list_mutex);
d1a4c0b3
GC
228 list_for_each_entry_reverse(proto, &proto_list, node)
229 if (proto->destroy_cgroup)
1d62e436 230 proto->destroy_cgroup(memcg);
36b77a52 231 mutex_unlock(&proto_list_mutex);
d1a4c0b3
GC
232}
233#endif
234
da21f24d
IM
235/*
236 * Each address family might have different locking rules, so we have
237 * one slock key per address family:
238 */
a5b5bb9a
IM
239static struct lock_class_key af_family_keys[AF_MAX];
240static struct lock_class_key af_family_slock_keys[AF_MAX];
241
cbda4eaf 242#if defined(CONFIG_MEMCG_KMEM)
c5905afb 243struct static_key memcg_socket_limit_enabled;
e1aab161 244EXPORT_SYMBOL(memcg_socket_limit_enabled);
cbda4eaf 245#endif
e1aab161 246
a5b5bb9a
IM
247/*
248 * Make lock validator output more readable. (we pre-construct these
249 * strings build-time, so that runtime initialization of socket
250 * locks is fast):
251 */
36cbd3dc 252static const char *const af_family_key_strings[AF_MAX+1] = {
a5b5bb9a
IM
253 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
254 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
255 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
256 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
257 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
258 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
259 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
cbd151bf 260 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
a5b5bb9a 261 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
cd05acfe 262 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
17926a79 263 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
bce7b154 264 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
6f107b58 265 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
456db6a4 266 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
a5b5bb9a 267};
36cbd3dc 268static const char *const af_family_slock_key_strings[AF_MAX+1] = {
a5b5bb9a
IM
269 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
270 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
271 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
272 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
273 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
274 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
275 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
cbd151bf 276 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
a5b5bb9a 277 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
cd05acfe 278 "slock-27" , "slock-28" , "slock-AF_CAN" ,
17926a79 279 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
bce7b154 280 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
6f107b58 281 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
456db6a4 282 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
a5b5bb9a 283};
36cbd3dc 284static const char *const af_family_clock_key_strings[AF_MAX+1] = {
443aef0e
PZ
285 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
286 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
287 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
288 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
289 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
290 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
291 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
cbd151bf 292 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
443aef0e 293 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
b4942af6 294 "clock-27" , "clock-28" , "clock-AF_CAN" ,
e51f802b 295 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
bce7b154 296 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
6f107b58 297 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
456db6a4 298 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
443aef0e 299};
da21f24d
IM
300
301/*
302 * sk_callback_lock locking rules are per-address-family,
303 * so split the lock classes by using a per-AF key:
304 */
305static struct lock_class_key af_callback_keys[AF_MAX];
306
1da177e4
LT
307/* Take into consideration the size of the struct sk_buff overhead in the
308 * determination of these values, since that is non-constant across
309 * platforms. This makes socket queueing behavior and performance
310 * not depend upon such differences.
311 */
312#define _SK_MEM_PACKETS 256
87fb4b7b 313#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
1da177e4
LT
314#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
315#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
316
317/* Run time adjustable parameters. */
ab32ea5d 318__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
6d8ebc8a 319EXPORT_SYMBOL(sysctl_wmem_max);
6fa3eb70 320__u32 sysctl_rmem_max __read_mostly = (SK_RMEM_MAX*8);
6d8ebc8a 321EXPORT_SYMBOL(sysctl_rmem_max);
ab32ea5d
BH
322__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
323__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
1da177e4 324
25985edc 325/* Maximal space eaten by iovec or ancillary data plus some space */
ab32ea5d 326int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
2a91525c 327EXPORT_SYMBOL(sysctl_optmem_max);
1da177e4 328
c93bdd0e
MG
329struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
330EXPORT_SYMBOL_GPL(memalloc_socks);
331
7cb02404
MG
332/**
333 * sk_set_memalloc - sets %SOCK_MEMALLOC
334 * @sk: socket to set it on
335 *
336 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
337 * It's the responsibility of the admin to adjust min_free_kbytes
338 * to meet the requirements
339 */
340void sk_set_memalloc(struct sock *sk)
341{
342 sock_set_flag(sk, SOCK_MEMALLOC);
343 sk->sk_allocation |= __GFP_MEMALLOC;
c93bdd0e 344 static_key_slow_inc(&memalloc_socks);
7cb02404
MG
345}
346EXPORT_SYMBOL_GPL(sk_set_memalloc);
347
348void sk_clear_memalloc(struct sock *sk)
349{
350 sock_reset_flag(sk, SOCK_MEMALLOC);
351 sk->sk_allocation &= ~__GFP_MEMALLOC;
c93bdd0e 352 static_key_slow_dec(&memalloc_socks);
c76562b6
MG
353
354 /*
355 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
356 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
357 * it has rmem allocations there is a risk that the user of the
358 * socket cannot make forward progress due to exceeding the rmem
359 * limits. By rights, sk_clear_memalloc() should only be called
360 * on sockets being torn down but warn and reset the accounting if
361 * that assumption breaks.
362 */
363 if (WARN_ON(sk->sk_forward_alloc))
364 sk_mem_reclaim(sk);
7cb02404
MG
365}
366EXPORT_SYMBOL_GPL(sk_clear_memalloc);
367
b4b9e355
MG
368int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
369{
370 int ret;
371 unsigned long pflags = current->flags;
372
373 /* these should have been dropped before queueing */
374 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
375
376 current->flags |= PF_MEMALLOC;
377 ret = sk->sk_backlog_rcv(sk, skb);
378 tsk_restore_flags(current, pflags, PF_MEMALLOC);
379
380 return ret;
381}
382EXPORT_SYMBOL(__sk_backlog_rcv);
383
1da177e4
LT
384static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
385{
386 struct timeval tv;
387
388 if (optlen < sizeof(tv))
389 return -EINVAL;
390 if (copy_from_user(&tv, optval, sizeof(tv)))
391 return -EFAULT;
ba78073e
VA
392 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
393 return -EDOM;
1da177e4 394
ba78073e 395 if (tv.tv_sec < 0) {
6f11df83
AM
396 static int warned __read_mostly;
397
ba78073e 398 *timeo_p = 0;
50aab54f 399 if (warned < 10 && net_ratelimit()) {
ba78073e 400 warned++;
e005d193
JP
401 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
402 __func__, current->comm, task_pid_nr(current));
50aab54f 403 }
ba78073e
VA
404 return 0;
405 }
1da177e4
LT
406 *timeo_p = MAX_SCHEDULE_TIMEOUT;
407 if (tv.tv_sec == 0 && tv.tv_usec == 0)
408 return 0;
409 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
410 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
411 return 0;
412}
413
414static void sock_warn_obsolete_bsdism(const char *name)
415{
416 static int warned;
417 static char warncomm[TASK_COMM_LEN];
4ec93edb
YH
418 if (strcmp(warncomm, current->comm) && warned < 5) {
419 strcpy(warncomm, current->comm);
e005d193
JP
420 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
421 warncomm, name);
1da177e4
LT
422 warned++;
423 }
424}
425
08e29af3
ED
426#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
427
428static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
4ec93edb 429{
08e29af3
ED
430 if (sk->sk_flags & flags) {
431 sk->sk_flags &= ~flags;
432 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
20d49473 433 net_disable_timestamp();
1da177e4
LT
434 }
435}
436
437
f0088a50
DV
438int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
439{
766e9037 440 int err;
f0088a50 441 int skb_len;
3b885787
NH
442 unsigned long flags;
443 struct sk_buff_head *list = &sk->sk_receive_queue;
f0088a50 444
0fd7bac6 445 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
766e9037 446 atomic_inc(&sk->sk_drops);
3847ce32 447 trace_sock_rcvqueue_full(sk, skb);
766e9037 448 return -ENOMEM;
f0088a50
DV
449 }
450
fda9ef5d 451 err = sk_filter(sk, skb);
f0088a50 452 if (err)
766e9037 453 return err;
f0088a50 454
c76562b6 455 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
766e9037
ED
456 atomic_inc(&sk->sk_drops);
457 return -ENOBUFS;
3ab224be
HA
458 }
459
f0088a50
DV
460 skb->dev = NULL;
461 skb_set_owner_r(skb, sk);
49ad9599 462
f0088a50
DV
463 /* Cache the SKB length before we tack it onto the receive
464 * queue. Once it is added it no longer belongs to us and
465 * may be freed by other threads of control pulling packets
466 * from the queue.
467 */
468 skb_len = skb->len;
469
7fee226a
ED
470 /* we escape from rcu protected region, make sure we dont leak
471 * a norefcounted dst
472 */
473 skb_dst_force(skb);
474
3b885787
NH
475 spin_lock_irqsave(&list->lock, flags);
476 skb->dropcount = atomic_read(&sk->sk_drops);
477 __skb_queue_tail(list, skb);
478 spin_unlock_irqrestore(&list->lock, flags);
f0088a50
DV
479
480 if (!sock_flag(sk, SOCK_DEAD))
481 sk->sk_data_ready(sk, skb_len);
766e9037 482 return 0;
f0088a50
DV
483}
484EXPORT_SYMBOL(sock_queue_rcv_skb);
485
58a5a7b9 486int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
f0088a50
DV
487{
488 int rc = NET_RX_SUCCESS;
489
fda9ef5d 490 if (sk_filter(sk, skb))
f0088a50
DV
491 goto discard_and_relse;
492
493 skb->dev = NULL;
494
f545a38f 495 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
c377411f
ED
496 atomic_inc(&sk->sk_drops);
497 goto discard_and_relse;
498 }
58a5a7b9
ACM
499 if (nested)
500 bh_lock_sock_nested(sk);
501 else
502 bh_lock_sock(sk);
a5b5bb9a
IM
503 if (!sock_owned_by_user(sk)) {
504 /*
505 * trylock + unlock semantics:
506 */
507 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
508
c57943a1 509 rc = sk_backlog_rcv(sk, skb);
a5b5bb9a
IM
510
511 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
f545a38f 512 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
8eae939f
ZY
513 bh_unlock_sock(sk);
514 atomic_inc(&sk->sk_drops);
515 goto discard_and_relse;
516 }
517
f0088a50
DV
518 bh_unlock_sock(sk);
519out:
520 sock_put(sk);
521 return rc;
522discard_and_relse:
523 kfree_skb(skb);
524 goto out;
525}
526EXPORT_SYMBOL(sk_receive_skb);
527
ea94ff3b
KK
528void sk_reset_txq(struct sock *sk)
529{
530 sk_tx_queue_clear(sk);
531}
532EXPORT_SYMBOL(sk_reset_txq);
533
f0088a50
DV
534struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
535{
b6c6712a 536 struct dst_entry *dst = __sk_dst_get(sk);
f0088a50
DV
537
538 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
e022f0b4 539 sk_tx_queue_clear(sk);
a9b3cd7f 540 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
f0088a50
DV
541 dst_release(dst);
542 return NULL;
543 }
544
545 return dst;
546}
547EXPORT_SYMBOL(__sk_dst_check);
548
549struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
550{
551 struct dst_entry *dst = sk_dst_get(sk);
552
553 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
554 sk_dst_reset(sk);
555 dst_release(dst);
556 return NULL;
557 }
558
559 return dst;
560}
561EXPORT_SYMBOL(sk_dst_check);
562
c91f6df2
BH
563static int sock_setbindtodevice(struct sock *sk, char __user *optval,
564 int optlen)
4878809f
DM
565{
566 int ret = -ENOPROTOOPT;
567#ifdef CONFIG_NETDEVICES
3b1e0a65 568 struct net *net = sock_net(sk);
4878809f
DM
569 char devname[IFNAMSIZ];
570 int index;
571
572 /* Sorry... */
573 ret = -EPERM;
5e1fccc0 574 if (!ns_capable(net->user_ns, CAP_NET_RAW))
4878809f
DM
575 goto out;
576
577 ret = -EINVAL;
578 if (optlen < 0)
579 goto out;
580
581 /* Bind this socket to a particular device like "eth0",
582 * as specified in the passed interface name. If the
583 * name is "" or the option length is zero the socket
584 * is not bound.
585 */
586 if (optlen > IFNAMSIZ - 1)
587 optlen = IFNAMSIZ - 1;
588 memset(devname, 0, sizeof(devname));
589
590 ret = -EFAULT;
591 if (copy_from_user(devname, optval, optlen))
592 goto out;
593
000ba2e4
DM
594 index = 0;
595 if (devname[0] != '\0') {
bf8e56bf 596 struct net_device *dev;
4878809f 597
bf8e56bf
ED
598 rcu_read_lock();
599 dev = dev_get_by_name_rcu(net, devname);
600 if (dev)
601 index = dev->ifindex;
602 rcu_read_unlock();
4878809f
DM
603 ret = -ENODEV;
604 if (!dev)
605 goto out;
4878809f
DM
606 }
607
608 lock_sock(sk);
609 sk->sk_bound_dev_if = index;
610 sk_dst_reset(sk);
611 release_sock(sk);
612
613 ret = 0;
614
615out:
616#endif
617
618 return ret;
619}
620
c91f6df2
BH
621static int sock_getbindtodevice(struct sock *sk, char __user *optval,
622 int __user *optlen, int len)
623{
624 int ret = -ENOPROTOOPT;
625#ifdef CONFIG_NETDEVICES
626 struct net *net = sock_net(sk);
c91f6df2 627 char devname[IFNAMSIZ];
c91f6df2
BH
628
629 if (sk->sk_bound_dev_if == 0) {
630 len = 0;
631 goto zero;
632 }
633
634 ret = -EINVAL;
635 if (len < IFNAMSIZ)
636 goto out;
637
5dbe7c17
NS
638 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
639 if (ret)
c91f6df2 640 goto out;
c91f6df2
BH
641
642 len = strlen(devname) + 1;
643
644 ret = -EFAULT;
645 if (copy_to_user(optval, devname, len))
646 goto out;
647
648zero:
649 ret = -EFAULT;
650 if (put_user(len, optlen))
651 goto out;
652
653 ret = 0;
654
655out:
656#endif
657
658 return ret;
659}
660
c0ef877b
PE
661static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
662{
663 if (valbool)
664 sock_set_flag(sk, bit);
665 else
666 sock_reset_flag(sk, bit);
667}
668
1da177e4
LT
669/*
670 * This is meant for all protocols to use and covers goings on
671 * at the socket level. Everything here is generic.
672 */
673
674int sock_setsockopt(struct socket *sock, int level, int optname,
b7058842 675 char __user *optval, unsigned int optlen)
1da177e4 676{
2a91525c 677 struct sock *sk = sock->sk;
1da177e4
LT
678 int val;
679 int valbool;
680 struct linger ling;
681 int ret = 0;
4ec93edb 682
1da177e4
LT
683 /*
684 * Options without arguments
685 */
686
4878809f 687 if (optname == SO_BINDTODEVICE)
c91f6df2 688 return sock_setbindtodevice(sk, optval, optlen);
4878809f 689
e71a4783
SH
690 if (optlen < sizeof(int))
691 return -EINVAL;
4ec93edb 692
1da177e4
LT
693 if (get_user(val, (int __user *)optval))
694 return -EFAULT;
4ec93edb 695
2a91525c 696 valbool = val ? 1 : 0;
1da177e4
LT
697
698 lock_sock(sk);
699
2a91525c 700 switch (optname) {
e71a4783 701 case SO_DEBUG:
2a91525c 702 if (val && !capable(CAP_NET_ADMIN))
e71a4783 703 ret = -EACCES;
2a91525c 704 else
c0ef877b 705 sock_valbool_flag(sk, SOCK_DBG, valbool);
e71a4783
SH
706 break;
707 case SO_REUSEADDR:
4a17fd52 708 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
e71a4783 709 break;
055dc21a
TH
710 case SO_REUSEPORT:
711 sk->sk_reuseport = valbool;
712 break;
e71a4783 713 case SO_TYPE:
49c794e9 714 case SO_PROTOCOL:
0d6038ee 715 case SO_DOMAIN:
e71a4783
SH
716 case SO_ERROR:
717 ret = -ENOPROTOOPT;
718 break;
719 case SO_DONTROUTE:
c0ef877b 720 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
e71a4783
SH
721 break;
722 case SO_BROADCAST:
723 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
724 break;
725 case SO_SNDBUF:
726 /* Don't error on this BSD doesn't and if you think
82981930
ED
727 * about it this is right. Otherwise apps have to
728 * play 'guess the biggest size' games. RCVBUF/SNDBUF
729 * are treated in BSD as hints
730 */
731 val = min_t(u32, val, sysctl_wmem_max);
b0573dea 732set_sndbuf:
e71a4783 733 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4b9e9796 734 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
82981930 735 /* Wake up sending tasks if we upped the value. */
e71a4783
SH
736 sk->sk_write_space(sk);
737 break;
1da177e4 738
e71a4783
SH
739 case SO_SNDBUFFORCE:
740 if (!capable(CAP_NET_ADMIN)) {
741 ret = -EPERM;
742 break;
743 }
744 goto set_sndbuf;
b0573dea 745
e71a4783
SH
746 case SO_RCVBUF:
747 /* Don't error on this BSD doesn't and if you think
82981930
ED
748 * about it this is right. Otherwise apps have to
749 * play 'guess the biggest size' games. RCVBUF/SNDBUF
750 * are treated in BSD as hints
751 */
752 val = min_t(u32, val, sysctl_rmem_max);
b0573dea 753set_rcvbuf:
e71a4783
SH
754 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
755 /*
756 * We double it on the way in to account for
757 * "struct sk_buff" etc. overhead. Applications
758 * assume that the SO_RCVBUF setting they make will
759 * allow that much actual data to be received on that
760 * socket.
761 *
762 * Applications are unaware that "struct sk_buff" and
763 * other overheads allocate from the receive buffer
764 * during socket buffer allocation.
765 *
766 * And after considering the possible alternatives,
767 * returning the value we actually used in getsockopt
768 * is the most desirable behavior.
769 */
4b9e9796 770 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
e71a4783
SH
771 break;
772
773 case SO_RCVBUFFORCE:
774 if (!capable(CAP_NET_ADMIN)) {
775 ret = -EPERM;
1da177e4 776 break;
e71a4783
SH
777 }
778 goto set_rcvbuf;
1da177e4 779
e71a4783 780 case SO_KEEPALIVE:
1da177e4 781#ifdef CONFIG_INET
3e10986d
ED
782 if (sk->sk_protocol == IPPROTO_TCP &&
783 sk->sk_type == SOCK_STREAM)
e71a4783 784 tcp_set_keepalive(sk, valbool);
1da177e4 785#endif
e71a4783
SH
786 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
787 break;
788
789 case SO_OOBINLINE:
790 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
791 break;
792
793 case SO_NO_CHECK:
794 sk->sk_no_check = valbool;
795 break;
796
797 case SO_PRIORITY:
5e1fccc0
EB
798 if ((val >= 0 && val <= 6) ||
799 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
e71a4783
SH
800 sk->sk_priority = val;
801 else
802 ret = -EPERM;
803 break;
804
805 case SO_LINGER:
806 if (optlen < sizeof(ling)) {
807 ret = -EINVAL; /* 1003.1g */
1da177e4 808 break;
e71a4783 809 }
2a91525c 810 if (copy_from_user(&ling, optval, sizeof(ling))) {
e71a4783 811 ret = -EFAULT;
1da177e4 812 break;
e71a4783
SH
813 }
814 if (!ling.l_onoff)
815 sock_reset_flag(sk, SOCK_LINGER);
816 else {
1da177e4 817#if (BITS_PER_LONG == 32)
e71a4783
SH
818 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
819 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
1da177e4 820 else
e71a4783
SH
821#endif
822 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
823 sock_set_flag(sk, SOCK_LINGER);
824 }
825 break;
826
827 case SO_BSDCOMPAT:
828 sock_warn_obsolete_bsdism("setsockopt");
829 break;
830
831 case SO_PASSCRED:
832 if (valbool)
833 set_bit(SOCK_PASSCRED, &sock->flags);
834 else
835 clear_bit(SOCK_PASSCRED, &sock->flags);
836 break;
837
838 case SO_TIMESTAMP:
92f37fd2 839 case SO_TIMESTAMPNS:
e71a4783 840 if (valbool) {
92f37fd2
ED
841 if (optname == SO_TIMESTAMP)
842 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
843 else
844 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
e71a4783 845 sock_set_flag(sk, SOCK_RCVTSTAMP);
20d49473 846 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
92f37fd2 847 } else {
e71a4783 848 sock_reset_flag(sk, SOCK_RCVTSTAMP);
92f37fd2
ED
849 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
850 }
e71a4783
SH
851 break;
852
20d49473
PO
853 case SO_TIMESTAMPING:
854 if (val & ~SOF_TIMESTAMPING_MASK) {
f249fb78 855 ret = -EINVAL;
20d49473
PO
856 break;
857 }
858 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
859 val & SOF_TIMESTAMPING_TX_HARDWARE);
860 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
861 val & SOF_TIMESTAMPING_TX_SOFTWARE);
862 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
863 val & SOF_TIMESTAMPING_RX_HARDWARE);
864 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
865 sock_enable_timestamp(sk,
866 SOCK_TIMESTAMPING_RX_SOFTWARE);
867 else
868 sock_disable_timestamp(sk,
08e29af3 869 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
20d49473
PO
870 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
871 val & SOF_TIMESTAMPING_SOFTWARE);
872 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
873 val & SOF_TIMESTAMPING_SYS_HARDWARE);
874 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
875 val & SOF_TIMESTAMPING_RAW_HARDWARE);
876 break;
877
e71a4783
SH
878 case SO_RCVLOWAT:
879 if (val < 0)
880 val = INT_MAX;
881 sk->sk_rcvlowat = val ? : 1;
882 break;
883
884 case SO_RCVTIMEO:
885 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
886 break;
887
888 case SO_SNDTIMEO:
889 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
890 break;
1da177e4 891
e71a4783
SH
892 case SO_ATTACH_FILTER:
893 ret = -EINVAL;
894 if (optlen == sizeof(struct sock_fprog)) {
895 struct sock_fprog fprog;
1da177e4 896
e71a4783
SH
897 ret = -EFAULT;
898 if (copy_from_user(&fprog, optval, sizeof(fprog)))
1da177e4 899 break;
e71a4783
SH
900
901 ret = sk_attach_filter(&fprog, sk);
902 }
903 break;
904
905 case SO_DETACH_FILTER:
55b33325 906 ret = sk_detach_filter(sk);
e71a4783 907 break;
1da177e4 908
d59577b6
VB
909 case SO_LOCK_FILTER:
910 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
911 ret = -EPERM;
912 else
913 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
914 break;
915
e71a4783
SH
916 case SO_PASSSEC:
917 if (valbool)
918 set_bit(SOCK_PASSSEC, &sock->flags);
919 else
920 clear_bit(SOCK_PASSSEC, &sock->flags);
921 break;
4a19ec58 922 case SO_MARK:
5e1fccc0 923 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
4a19ec58 924 ret = -EPERM;
2a91525c 925 else
4a19ec58 926 sk->sk_mark = val;
4a19ec58 927 break;
877ce7c1 928
1da177e4
LT
929 /* We implement the SO_SNDLOWAT etc to
930 not be settable (1003.1g 5.3) */
3b885787 931 case SO_RXQ_OVFL:
8083f0fc 932 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
3b885787 933 break;
6e3e939f
JB
934
935 case SO_WIFI_STATUS:
936 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
937 break;
938
ef64a54f
PE
939 case SO_PEEK_OFF:
940 if (sock->ops->set_peek_off)
d90d9ff6 941 ret = sock->ops->set_peek_off(sk, val);
ef64a54f
PE
942 else
943 ret = -EOPNOTSUPP;
944 break;
3bdc0eba
BG
945
946 case SO_NOFCS:
947 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
948 break;
949
7d4c04fc
KJ
950 case SO_SELECT_ERR_QUEUE:
951 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
952 break;
953
e71a4783
SH
954 default:
955 ret = -ENOPROTOOPT;
956 break;
4ec93edb 957 }
1da177e4
LT
958 release_sock(sk);
959 return ret;
960}
2a91525c 961EXPORT_SYMBOL(sock_setsockopt);
1da177e4
LT
962
963
3f551f94
EB
964void cred_to_ucred(struct pid *pid, const struct cred *cred,
965 struct ucred *ucred)
966{
967 ucred->pid = pid_vnr(pid);
968 ucred->uid = ucred->gid = -1;
969 if (cred) {
970 struct user_namespace *current_ns = current_user_ns();
971
b2e4f544
EB
972 ucred->uid = from_kuid_munged(current_ns, cred->euid);
973 ucred->gid = from_kgid_munged(current_ns, cred->egid);
3f551f94
EB
974 }
975}
3924773a 976EXPORT_SYMBOL_GPL(cred_to_ucred);
3f551f94 977
1da177e4
LT
978int sock_getsockopt(struct socket *sock, int level, int optname,
979 char __user *optval, int __user *optlen)
980{
981 struct sock *sk = sock->sk;
4ec93edb 982
e71a4783 983 union {
4ec93edb
YH
984 int val;
985 struct linger ling;
1da177e4
LT
986 struct timeval tm;
987 } v;
4ec93edb 988
4d0392be 989 int lv = sizeof(int);
1da177e4 990 int len;
4ec93edb 991
e71a4783 992 if (get_user(len, optlen))
4ec93edb 993 return -EFAULT;
e71a4783 994 if (len < 0)
1da177e4 995 return -EINVAL;
4ec93edb 996
50fee1de 997 memset(&v, 0, sizeof(v));
df0bca04 998
2a91525c 999 switch (optname) {
e71a4783
SH
1000 case SO_DEBUG:
1001 v.val = sock_flag(sk, SOCK_DBG);
1002 break;
1003
1004 case SO_DONTROUTE:
1005 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1006 break;
1007
1008 case SO_BROADCAST:
1b23a5df 1009 v.val = sock_flag(sk, SOCK_BROADCAST);
e71a4783
SH
1010 break;
1011
1012 case SO_SNDBUF:
1013 v.val = sk->sk_sndbuf;
1014 break;
1015
1016 case SO_RCVBUF:
1017 v.val = sk->sk_rcvbuf;
1018 break;
1019
1020 case SO_REUSEADDR:
1021 v.val = sk->sk_reuse;
1022 break;
1023
055dc21a
TH
1024 case SO_REUSEPORT:
1025 v.val = sk->sk_reuseport;
1026 break;
1027
e71a4783 1028 case SO_KEEPALIVE:
1b23a5df 1029 v.val = sock_flag(sk, SOCK_KEEPOPEN);
e71a4783
SH
1030 break;
1031
1032 case SO_TYPE:
1033 v.val = sk->sk_type;
1034 break;
1035
49c794e9
JE
1036 case SO_PROTOCOL:
1037 v.val = sk->sk_protocol;
1038 break;
1039
0d6038ee
JE
1040 case SO_DOMAIN:
1041 v.val = sk->sk_family;
1042 break;
1043
e71a4783
SH
1044 case SO_ERROR:
1045 v.val = -sock_error(sk);
2a91525c 1046 if (v.val == 0)
e71a4783
SH
1047 v.val = xchg(&sk->sk_err_soft, 0);
1048 break;
1049
1050 case SO_OOBINLINE:
1b23a5df 1051 v.val = sock_flag(sk, SOCK_URGINLINE);
e71a4783
SH
1052 break;
1053
1054 case SO_NO_CHECK:
1055 v.val = sk->sk_no_check;
1056 break;
1057
1058 case SO_PRIORITY:
1059 v.val = sk->sk_priority;
1060 break;
1061
1062 case SO_LINGER:
1063 lv = sizeof(v.ling);
1b23a5df 1064 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
e71a4783
SH
1065 v.ling.l_linger = sk->sk_lingertime / HZ;
1066 break;
1067
1068 case SO_BSDCOMPAT:
1069 sock_warn_obsolete_bsdism("getsockopt");
1070 break;
1071
1072 case SO_TIMESTAMP:
92f37fd2
ED
1073 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1074 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1075 break;
1076
1077 case SO_TIMESTAMPNS:
1078 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
e71a4783
SH
1079 break;
1080
20d49473
PO
1081 case SO_TIMESTAMPING:
1082 v.val = 0;
1083 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
1084 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
1085 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
1086 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
1087 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
1088 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
1089 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1090 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
1091 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
1092 v.val |= SOF_TIMESTAMPING_SOFTWARE;
1093 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
1094 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
1095 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
1096 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
1097 break;
1098
e71a4783 1099 case SO_RCVTIMEO:
2a91525c 1100 lv = sizeof(struct timeval);
e71a4783
SH
1101 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1102 v.tm.tv_sec = 0;
1103 v.tm.tv_usec = 0;
1104 } else {
1105 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1106 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1107 }
1108 break;
1109
1110 case SO_SNDTIMEO:
2a91525c 1111 lv = sizeof(struct timeval);
e71a4783
SH
1112 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1113 v.tm.tv_sec = 0;
1114 v.tm.tv_usec = 0;
1115 } else {
1116 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1117 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1118 }
1119 break;
1da177e4 1120
e71a4783
SH
1121 case SO_RCVLOWAT:
1122 v.val = sk->sk_rcvlowat;
1123 break;
1da177e4 1124
e71a4783 1125 case SO_SNDLOWAT:
2a91525c 1126 v.val = 1;
e71a4783 1127 break;
1da177e4 1128
e71a4783 1129 case SO_PASSCRED:
82981930 1130 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
e71a4783 1131 break;
1da177e4 1132
e71a4783 1133 case SO_PEERCRED:
109f6e39
EB
1134 {
1135 struct ucred peercred;
1136 if (len > sizeof(peercred))
1137 len = sizeof(peercred);
1138 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1139 if (copy_to_user(optval, &peercred, len))
e71a4783
SH
1140 return -EFAULT;
1141 goto lenout;
109f6e39 1142 }
1da177e4 1143
e71a4783
SH
1144 case SO_PEERNAME:
1145 {
1146 char address[128];
1147
1148 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1149 return -ENOTCONN;
1150 if (lv < len)
1151 return -EINVAL;
1152 if (copy_to_user(optval, address, len))
1153 return -EFAULT;
1154 goto lenout;
1155 }
1da177e4 1156
e71a4783
SH
1157 /* Dubious BSD thing... Probably nobody even uses it, but
1158 * the UNIX standard wants it for whatever reason... -DaveM
1159 */
1160 case SO_ACCEPTCONN:
1161 v.val = sk->sk_state == TCP_LISTEN;
1162 break;
1da177e4 1163
e71a4783 1164 case SO_PASSSEC:
82981930 1165 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
e71a4783 1166 break;
877ce7c1 1167
e71a4783
SH
1168 case SO_PEERSEC:
1169 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1da177e4 1170
4a19ec58
LAT
1171 case SO_MARK:
1172 v.val = sk->sk_mark;
1173 break;
1174
3b885787 1175 case SO_RXQ_OVFL:
1b23a5df 1176 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
3b885787
NH
1177 break;
1178
6e3e939f 1179 case SO_WIFI_STATUS:
1b23a5df 1180 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
6e3e939f
JB
1181 break;
1182
ef64a54f
PE
1183 case SO_PEEK_OFF:
1184 if (!sock->ops->set_peek_off)
1185 return -EOPNOTSUPP;
1186
1187 v.val = sk->sk_peek_off;
1188 break;
bc2f7996 1189 case SO_NOFCS:
1b23a5df 1190 v.val = sock_flag(sk, SOCK_NOFCS);
bc2f7996 1191 break;
c91f6df2 1192
f7b86bfe 1193 case SO_BINDTODEVICE:
c91f6df2
BH
1194 return sock_getbindtodevice(sk, optval, optlen, len);
1195
a8fc9277
PE
1196 case SO_GET_FILTER:
1197 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1198 if (len < 0)
1199 return len;
1200
1201 goto lenout;
c91f6df2 1202
d59577b6
VB
1203 case SO_LOCK_FILTER:
1204 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1205 break;
1206
7d4c04fc
KJ
1207 case SO_SELECT_ERR_QUEUE:
1208 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1209 break;
1210
e71a4783
SH
1211 default:
1212 return -ENOPROTOOPT;
1da177e4 1213 }
e71a4783 1214
1da177e4
LT
1215 if (len > lv)
1216 len = lv;
1217 if (copy_to_user(optval, &v, len))
1218 return -EFAULT;
1219lenout:
4ec93edb
YH
1220 if (put_user(len, optlen))
1221 return -EFAULT;
1222 return 0;
1da177e4
LT
1223}
1224
a5b5bb9a
IM
1225/*
1226 * Initialize an sk_lock.
1227 *
1228 * (We also register the sk_lock with the lock validator.)
1229 */
b6f99a21 1230static inline void sock_lock_init(struct sock *sk)
a5b5bb9a 1231{
ed07536e
PZ
1232 sock_lock_init_class_and_name(sk,
1233 af_family_slock_key_strings[sk->sk_family],
1234 af_family_slock_keys + sk->sk_family,
1235 af_family_key_strings[sk->sk_family],
1236 af_family_keys + sk->sk_family);
a5b5bb9a
IM
1237}
1238
4dc6dc71
ED
1239/*
1240 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1241 * even temporarly, because of RCU lookups. sk_node should also be left as is.
68835aba 1242 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
4dc6dc71 1243 */
f1a6c4da
PE
1244static void sock_copy(struct sock *nsk, const struct sock *osk)
1245{
1246#ifdef CONFIG_SECURITY_NETWORK
1247 void *sptr = nsk->sk_security;
1248#endif
68835aba
ED
1249 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1250
1251 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1252 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1253
f1a6c4da
PE
1254#ifdef CONFIG_SECURITY_NETWORK
1255 nsk->sk_security = sptr;
1256 security_sk_clone(osk, nsk);
1257#endif
1258}
1259
fcbdf09d
OP
1260void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1261{
1262 unsigned long nulls1, nulls2;
1263
1264 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1265 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1266 if (nulls1 > nulls2)
1267 swap(nulls1, nulls2);
1268
1269 if (nulls1 != 0)
1270 memset((char *)sk, 0, nulls1);
1271 memset((char *)sk + nulls1 + sizeof(void *), 0,
1272 nulls2 - nulls1 - sizeof(void *));
1273 memset((char *)sk + nulls2 + sizeof(void *), 0,
1274 size - nulls2 - sizeof(void *));
1275}
1276EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1277
2e4afe7b
PE
1278static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1279 int family)
c308c1b2
PE
1280{
1281 struct sock *sk;
1282 struct kmem_cache *slab;
1283
1284 slab = prot->slab;
e912b114
ED
1285 if (slab != NULL) {
1286 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1287 if (!sk)
1288 return sk;
1289 if (priority & __GFP_ZERO) {
fcbdf09d
OP
1290 if (prot->clear_sk)
1291 prot->clear_sk(sk, prot->obj_size);
1292 else
1293 sk_prot_clear_nulls(sk, prot->obj_size);
e912b114 1294 }
fcbdf09d 1295 } else
c308c1b2
PE
1296 sk = kmalloc(prot->obj_size, priority);
1297
2e4afe7b 1298 if (sk != NULL) {
a98b65a3
VN
1299 kmemcheck_annotate_bitfield(sk, flags);
1300
2e4afe7b
PE
1301 if (security_sk_alloc(sk, family, priority))
1302 goto out_free;
1303
1304 if (!try_module_get(prot->owner))
1305 goto out_free_sec;
e022f0b4 1306 sk_tx_queue_clear(sk);
2e4afe7b
PE
1307 }
1308
c308c1b2 1309 return sk;
2e4afe7b
PE
1310
1311out_free_sec:
1312 security_sk_free(sk);
1313out_free:
1314 if (slab != NULL)
1315 kmem_cache_free(slab, sk);
1316 else
1317 kfree(sk);
1318 return NULL;
c308c1b2
PE
1319}
1320
1321static void sk_prot_free(struct proto *prot, struct sock *sk)
1322{
1323 struct kmem_cache *slab;
2e4afe7b 1324 struct module *owner;
c308c1b2 1325
2e4afe7b 1326 owner = prot->owner;
c308c1b2 1327 slab = prot->slab;
2e4afe7b
PE
1328
1329 security_sk_free(sk);
c308c1b2
PE
1330 if (slab != NULL)
1331 kmem_cache_free(slab, sk);
1332 else
1333 kfree(sk);
2e4afe7b 1334 module_put(owner);
c308c1b2
PE
1335}
1336
8fb974c9 1337#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
211d2f97 1338void sock_update_classid(struct sock *sk)
f8451725 1339{
1144182a 1340 u32 classid;
f8451725 1341
211d2f97 1342 classid = task_cls_classid(current);
3afa6d00 1343 if (classid != sk->sk_classid)
f8451725
HX
1344 sk->sk_classid = classid;
1345}
82862742 1346EXPORT_SYMBOL(sock_update_classid);
8fb974c9 1347#endif
5bc1421e 1348
51e4e7fa 1349#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
6ffd4641 1350void sock_update_netprioidx(struct sock *sk)
5bc1421e 1351{
5bc1421e
NH
1352 if (in_interrupt())
1353 return;
2b73bc65 1354
6ffd4641 1355 sk->sk_cgrp_prioidx = task_netprioidx(current);
5bc1421e
NH
1356}
1357EXPORT_SYMBOL_GPL(sock_update_netprioidx);
f8451725
HX
1358#endif
1359
1da177e4
LT
1360/**
1361 * sk_alloc - All socket objects are allocated here
c4ea43c5 1362 * @net: the applicable net namespace
4dc3b16b
PP
1363 * @family: protocol family
1364 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1365 * @prot: struct proto associated with this new sock instance
1da177e4 1366 */
1b8d7ae4 1367struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
6257ff21 1368 struct proto *prot)
1da177e4 1369{
c308c1b2 1370 struct sock *sk;
1da177e4 1371
154adbc8 1372 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1da177e4 1373 if (sk) {
154adbc8
PE
1374 sk->sk_family = family;
1375 /*
1376 * See comment in struct sock definition to understand
1377 * why we need sk_prot_creator -acme
1378 */
1379 sk->sk_prot = sk->sk_prot_creator = prot;
1380 sock_lock_init(sk);
3b1e0a65 1381 sock_net_set(sk, get_net(net));
d66ee058 1382 atomic_set(&sk->sk_wmem_alloc, 1);
f8451725 1383
211d2f97 1384 sock_update_classid(sk);
6ffd4641 1385 sock_update_netprioidx(sk);
1da177e4 1386 }
a79af59e 1387
2e4afe7b 1388 return sk;
1da177e4 1389}
2a91525c 1390EXPORT_SYMBOL(sk_alloc);
1da177e4 1391
2b85a34e 1392static void __sk_free(struct sock *sk)
1da177e4
LT
1393{
1394 struct sk_filter *filter;
1da177e4
LT
1395
1396 if (sk->sk_destruct)
1397 sk->sk_destruct(sk);
1398
a898def2
PM
1399 filter = rcu_dereference_check(sk->sk_filter,
1400 atomic_read(&sk->sk_wmem_alloc) == 0);
1da177e4 1401 if (filter) {
309dd5fc 1402 sk_filter_uncharge(sk, filter);
a9b3cd7f 1403 RCU_INIT_POINTER(sk->sk_filter, NULL);
1da177e4
LT
1404 }
1405
08e29af3 1406 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1da177e4
LT
1407
1408 if (atomic_read(&sk->sk_omem_alloc))
e005d193
JP
1409 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1410 __func__, atomic_read(&sk->sk_omem_alloc));
1da177e4 1411
109f6e39
EB
1412 if (sk->sk_peer_cred)
1413 put_cred(sk->sk_peer_cred);
1414 put_pid(sk->sk_peer_pid);
3b1e0a65 1415 put_net(sock_net(sk));
c308c1b2 1416 sk_prot_free(sk->sk_prot_creator, sk);
1da177e4 1417}
2b85a34e
ED
1418
1419void sk_free(struct sock *sk)
1420{
1421 /*
25985edc 1422 * We subtract one from sk_wmem_alloc and can know if
2b85a34e
ED
1423 * some packets are still in some tx queue.
1424 * If not null, sock_wfree() will call __sk_free(sk) later
1425 */
1426 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1427 __sk_free(sk);
1428}
2a91525c 1429EXPORT_SYMBOL(sk_free);
1da177e4 1430
edf02087 1431/*
25985edc
LDM
1432 * Last sock_put should drop reference to sk->sk_net. It has already
1433 * been dropped in sk_change_net. Taking reference to stopping namespace
edf02087 1434 * is not an option.
25985edc 1435 * Take reference to a socket to remove it from hash _alive_ and after that
edf02087
DL
1436 * destroy it in the context of init_net.
1437 */
1438void sk_release_kernel(struct sock *sk)
1439{
1440 if (sk == NULL || sk->sk_socket == NULL)
1441 return;
1442
1443 sock_hold(sk);
1444 sock_release(sk->sk_socket);
65a18ec5 1445 release_net(sock_net(sk));
3b1e0a65 1446 sock_net_set(sk, get_net(&init_net));
edf02087
DL
1447 sock_put(sk);
1448}
45af1754 1449EXPORT_SYMBOL(sk_release_kernel);
edf02087 1450
475f1b52
SR
1451static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1452{
1453 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1454 sock_update_memcg(newsk);
1455}
1456
e56c57d0
ED
1457/**
1458 * sk_clone_lock - clone a socket, and lock its clone
1459 * @sk: the socket to clone
1460 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1461 *
1462 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1463 */
1464struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
87d11ceb 1465{
8fd1d178 1466 struct sock *newsk;
87d11ceb 1467
8fd1d178 1468 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
87d11ceb
ACM
1469 if (newsk != NULL) {
1470 struct sk_filter *filter;
1471
892c141e 1472 sock_copy(newsk, sk);
87d11ceb
ACM
1473
1474 /* SANITY */
3b1e0a65 1475 get_net(sock_net(newsk));
87d11ceb
ACM
1476 sk_node_init(&newsk->sk_node);
1477 sock_lock_init(newsk);
1478 bh_lock_sock(newsk);
fa438ccf 1479 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
8eae939f 1480 newsk->sk_backlog.len = 0;
87d11ceb
ACM
1481
1482 atomic_set(&newsk->sk_rmem_alloc, 0);
2b85a34e
ED
1483 /*
1484 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1485 */
1486 atomic_set(&newsk->sk_wmem_alloc, 1);
87d11ceb
ACM
1487 atomic_set(&newsk->sk_omem_alloc, 0);
1488 skb_queue_head_init(&newsk->sk_receive_queue);
1489 skb_queue_head_init(&newsk->sk_write_queue);
97fc2f08
CL
1490#ifdef CONFIG_NET_DMA
1491 skb_queue_head_init(&newsk->sk_async_wait_queue);
1492#endif
87d11ceb 1493
b6c6712a 1494 spin_lock_init(&newsk->sk_dst_lock);
87d11ceb 1495 rwlock_init(&newsk->sk_callback_lock);
443aef0e
PZ
1496 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1497 af_callback_keys + newsk->sk_family,
1498 af_family_clock_key_strings[newsk->sk_family]);
87d11ceb
ACM
1499
1500 newsk->sk_dst_cache = NULL;
1501 newsk->sk_wmem_queued = 0;
1502 newsk->sk_forward_alloc = 0;
1503 newsk->sk_send_head = NULL;
87d11ceb
ACM
1504 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1505
1506 sock_reset_flag(newsk, SOCK_DONE);
1507 skb_queue_head_init(&newsk->sk_error_queue);
1508
0d7da9dd 1509 filter = rcu_dereference_protected(newsk->sk_filter, 1);
87d11ceb
ACM
1510 if (filter != NULL)
1511 sk_filter_charge(newsk, filter);
1512
1513 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1514 /* It is still raw copy of parent, so invalidate
1515 * destructor and make plain sk_free() */
1516 newsk->sk_destruct = NULL;
b0691c8e 1517 bh_unlock_sock(newsk);
87d11ceb
ACM
1518 sk_free(newsk);
1519 newsk = NULL;
1520 goto out;
1521 }
1522
1523 newsk->sk_err = 0;
1524 newsk->sk_priority = 0;
4dc6dc71
ED
1525 /*
1526 * Before updating sk_refcnt, we must commit prior changes to memory
1527 * (Documentation/RCU/rculist_nulls.txt for details)
1528 */
1529 smp_wmb();
87d11ceb
ACM
1530 atomic_set(&newsk->sk_refcnt, 2);
1531
1532 /*
1533 * Increment the counter in the same struct proto as the master
1534 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1535 * is the same as sk->sk_prot->socks, as this field was copied
1536 * with memcpy).
1537 *
1538 * This _changes_ the previous behaviour, where
1539 * tcp_create_openreq_child always was incrementing the
1540 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1541 * to be taken into account in all callers. -acme
1542 */
1543 sk_refcnt_debug_inc(newsk);
972692e0 1544 sk_set_socket(newsk, NULL);
43815482 1545 newsk->sk_wq = NULL;
87d11ceb 1546
f3f511e1
GC
1547 sk_update_clone(sk, newsk);
1548
87d11ceb 1549 if (newsk->sk_prot->sockets_allocated)
180d8cd9 1550 sk_sockets_allocated_inc(newsk);
704da560 1551
08e29af3 1552 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
704da560 1553 net_enable_timestamp();
87d11ceb
ACM
1554 }
1555out:
1556 return newsk;
1557}
e56c57d0 1558EXPORT_SYMBOL_GPL(sk_clone_lock);
87d11ceb 1559
9958089a
AK
1560void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1561{
1562 __sk_dst_set(sk, dst);
1563 sk->sk_route_caps = dst->dev->features;
1564 if (sk->sk_route_caps & NETIF_F_GSO)
4fcd6b99 1565 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
a465419b 1566 sk->sk_route_caps &= ~sk->sk_route_nocaps;
9958089a 1567 if (sk_can_gso(sk)) {
82cc1a7a 1568 if (dst->header_len) {
9958089a 1569 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
82cc1a7a 1570 } else {
9958089a 1571 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
82cc1a7a 1572 sk->sk_gso_max_size = dst->dev->gso_max_size;
1485348d 1573 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
82cc1a7a 1574 }
9958089a
AK
1575 }
1576}
1577EXPORT_SYMBOL_GPL(sk_setup_caps);
1578
1da177e4
LT
1579/*
1580 * Simple resource managers for sockets.
1581 */
1582
1583
4ec93edb
YH
1584/*
1585 * Write buffer destructor automatically called from kfree_skb.
1da177e4
LT
1586 */
1587void sock_wfree(struct sk_buff *skb)
1588{
1589 struct sock *sk = skb->sk;
d99927f4 1590 unsigned int len = skb->truesize;
1da177e4 1591
d99927f4
ED
1592 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1593 /*
1594 * Keep a reference on sk_wmem_alloc, this will be released
1595 * after sk_write_space() call
1596 */
1597 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1da177e4 1598 sk->sk_write_space(sk);
d99927f4
ED
1599 len = 1;
1600 }
2b85a34e 1601 /*
d99927f4
ED
1602 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1603 * could not do because of in-flight packets
2b85a34e 1604 */
d99927f4 1605 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
2b85a34e 1606 __sk_free(sk);
1da177e4 1607}
2a91525c 1608EXPORT_SYMBOL(sock_wfree);
1da177e4 1609
4ec93edb
YH
1610/*
1611 * Read buffer destructor automatically called from kfree_skb.
1da177e4
LT
1612 */
1613void sock_rfree(struct sk_buff *skb)
1614{
1615 struct sock *sk = skb->sk;
d361fd59 1616 unsigned int len = skb->truesize;
1da177e4 1617
d361fd59
ED
1618 atomic_sub(len, &sk->sk_rmem_alloc);
1619 sk_mem_uncharge(sk, len);
1da177e4 1620}
2a91525c 1621EXPORT_SYMBOL(sock_rfree);
1da177e4 1622
41063e9d
DM
1623void sock_edemux(struct sk_buff *skb)
1624{
e812347c
ED
1625 struct sock *sk = skb->sk;
1626
1c463e57 1627#ifdef CONFIG_INET
e812347c
ED
1628 if (sk->sk_state == TCP_TIME_WAIT)
1629 inet_twsk_put(inet_twsk(sk));
1630 else
1c463e57 1631#endif
e812347c 1632 sock_put(sk);
41063e9d
DM
1633}
1634EXPORT_SYMBOL(sock_edemux);
1da177e4 1635
976d0201 1636kuid_t sock_i_uid(struct sock *sk)
1da177e4 1637{
976d0201 1638 kuid_t uid;
6fa3eb70
S
1639
1640 /*mtk_net: fix kernel bug*/
1641 if (!sk) {
1642 pr_info("sk == NULL for sock_i_uid\n");
1643 return GLOBAL_ROOT_UID;
1644 }
1645
f064af1e 1646 read_lock_bh(&sk->sk_callback_lock);
976d0201 1647 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
f064af1e 1648 read_unlock_bh(&sk->sk_callback_lock);
1da177e4
LT
1649 return uid;
1650}
2a91525c 1651EXPORT_SYMBOL(sock_i_uid);
1da177e4
LT
1652
1653unsigned long sock_i_ino(struct sock *sk)
1654{
1655 unsigned long ino;
1656
f064af1e 1657 read_lock_bh(&sk->sk_callback_lock);
1da177e4 1658 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
f064af1e 1659 read_unlock_bh(&sk->sk_callback_lock);
1da177e4
LT
1660 return ino;
1661}
2a91525c 1662EXPORT_SYMBOL(sock_i_ino);
1da177e4
LT
1663
1664/*
1665 * Allocate a skb from the socket's send buffer.
1666 */
86a76caf 1667struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
dd0fc66f 1668 gfp_t priority)
1da177e4
LT
1669{
1670 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
2a91525c 1671 struct sk_buff *skb = alloc_skb(size, priority);
1da177e4
LT
1672 if (skb) {
1673 skb_set_owner_w(skb, sk);
1674 return skb;
1675 }
1676 }
1677 return NULL;
1678}
2a91525c 1679EXPORT_SYMBOL(sock_wmalloc);
1da177e4
LT
1680
1681/*
1682 * Allocate a skb from the socket's receive buffer.
4ec93edb 1683 */
86a76caf 1684struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
dd0fc66f 1685 gfp_t priority)
1da177e4
LT
1686{
1687 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1688 struct sk_buff *skb = alloc_skb(size, priority);
1689 if (skb) {
1690 skb_set_owner_r(skb, sk);
1691 return skb;
1692 }
1693 }
1694 return NULL;
1695}
1696
4ec93edb 1697/*
1da177e4 1698 * Allocate a memory block from the socket's option memory buffer.
4ec93edb 1699 */
dd0fc66f 1700void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1da177e4 1701{
95c96174 1702 if ((unsigned int)size <= sysctl_optmem_max &&
1da177e4
LT
1703 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1704 void *mem;
1705 /* First do the add, to avoid the race if kmalloc
4ec93edb 1706 * might sleep.
1da177e4
LT
1707 */
1708 atomic_add(size, &sk->sk_omem_alloc);
1709 mem = kmalloc(size, priority);
1710 if (mem)
1711 return mem;
1712 atomic_sub(size, &sk->sk_omem_alloc);
1713 }
1714 return NULL;
1715}
2a91525c 1716EXPORT_SYMBOL(sock_kmalloc);
1da177e4
LT
1717
1718/*
1719 * Free an option memory block.
1720 */
1721void sock_kfree_s(struct sock *sk, void *mem, int size)
1722{
1723 kfree(mem);
1724 atomic_sub(size, &sk->sk_omem_alloc);
1725}
2a91525c 1726EXPORT_SYMBOL(sock_kfree_s);
1da177e4
LT
1727
1728/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1729 I think, these locks should be removed for datagram sockets.
1730 */
2a91525c 1731static long sock_wait_for_wmem(struct sock *sk, long timeo)
1da177e4
LT
1732{
1733 DEFINE_WAIT(wait);
1734
1735 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1736 for (;;) {
1737 if (!timeo)
1738 break;
1739 if (signal_pending(current))
1740 break;
1741 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
aa395145 1742 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1da177e4
LT
1743 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1744 break;
1745 if (sk->sk_shutdown & SEND_SHUTDOWN)
1746 break;
1747 if (sk->sk_err)
1748 break;
1749 timeo = schedule_timeout(timeo);
1750 }
aa395145 1751 finish_wait(sk_sleep(sk), &wait);
1da177e4
LT
1752 return timeo;
1753}
1754
1755
6fa3eb70
S
1756//debug funcion
1757
1758static int sock_dump_info(struct sock *sk)
1759{
1760 //dump receiver queue 128 bytes
1761 //struct sk_buff *skb;
1762 //char skbmsg[128];
1763 //dump receiver queue 128 bytes end
1764
1765 if(sk->sk_family == AF_UNIX)
1766 {
1767 struct unix_sock *u = unix_sk(sk);
1768 struct sock *other = NULL;
1769 if( (u->path.dentry !=NULL)&&(u->path.dentry->d_iname!=NULL))
1770 //if( (u->dentry !=NULL)&&(u->dentry->d_iname!=NULL))
1771 {
1772 #ifdef CONFIG_MTK_NET_LOGGING
1773 printk(KERN_INFO "[mtk_net][sock]sockdbg: socket-Name:%s \n",u->path.dentry->d_iname);
1774 #endif
1775 }
1776 else
1777 {
1778 #ifdef CONFIG_MTK_NET_LOGGING
1779 printk(KERN_INFO "[mtk_net][sock]sockdbg:socket Name (NULL)\n" );
1780 #endif
1781 }
1782
1783 if(sk->sk_socket && SOCK_INODE(sk->sk_socket))
1784 {
1785 #ifdef CONFIG_MTK_NET_LOGGING
1786 printk(KERN_INFO "[mtk_net][sock]sockdbg:socket Inode[%lu]\n" ,SOCK_INODE(sk->sk_socket)->i_ino);
1787 #endif
1788 }
1789
1790 other = unix_sk(sk)->peer ;
1791 if (!other)
1792 {
1793 #ifdef CONFIG_MTK_NET_LOGGING
1794 printk(KERN_INFO "[mtk_net][sock]sockdbg:peer is (NULL) \n");
1795 #endif
1796 } else{
1797
1798 if ((((struct unix_sock *)other)->path.dentry != NULL)&&(((struct unix_sock *)other)->path.dentry->d_iname != NULL))
1799 //if ((((struct unix_sock *)other)->dentry != NULL)&&(((struct unix_sock *)other)->dentry->d_iname != NULL))
1800 {
1801 #ifdef CONFIG_MTK_NET_LOGGING
1802 printk(KERN_INFO "[mtk_net][sock]sockdbg: Peer Name:%s \n",((struct unix_sock *)other)->path.dentry->d_iname);
1803 #endif
1804 }
1805 else
1806 {
1807 #ifdef CONFIG_MTK_NET_LOGGING
1808 printk(KERN_INFO "[mtk_net][sock]sockdbg: Peer Name (NULL) \n");
1809 #endif
1810 }
1811
1812 if(other->sk_socket && SOCK_INODE(other->sk_socket))
1813 {
1814 #ifdef CONFIG_MTK_NET_LOGGING
1815 printk(KERN_INFO "[mtk_net][sock]sockdbg: Peer Inode [%lu] \n", SOCK_INODE(other->sk_socket)->i_ino);
1816 #endif
1817 }
1818 #ifdef CONFIG_MTK_NET_LOGGING
1819 printk(KERN_INFO "[mtk_net][sock]sockdbg: Peer Recieve Queue len:%d \n",other->sk_receive_queue.qlen);
1820 #endif
1821 //dump receiver queue 128 bytes
1822 /* if ((skb = skb_peek_tail(&other->sk_receive_queue)) == NULL) {
1823
1824 printk(KERN_INFO "sockdbg: Peer Recieve Queue is null (warning) \n");
1825 }else{
1826 int i =0 ,len=0;
1827 if((skb->len !=0) && (skb->data != NULL)){
1828
1829 if(skb->len >= 127){
1830 len = 127 ;
1831 }else
1832 {
1833 len = skb->len ;
1834 }
1835 for (i=0;i<len;i++)
1836 sprintf(skbmsg+i, "%x", skb->data[i]);
1837
1838 skbmsg[len]= '\0' ;
1839
1840 printk(KERN_INFO "sockdbg: Peer Recieve Queue dump(%d bytes):%s\n", len, skbmsg);
1841
1842
1843 }else{
1844 printk(KERN_INFO "sockdbg: Peer Recieve skb error \n");
1845 }*/
1846 //dump receiver queue 128 bytes end
1847
1848 //}
1849 //dump receiver queue 128 bytes end
1850
1851 }
1852 }
1853
1854 return 0 ;
1855
1856
1857}
1858
1859
1860
1da177e4
LT
1861/*
1862 * Generic send/receive buffer handlers
1863 */
1864
4cc7f68d
HX
1865struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1866 unsigned long data_len, int noblock,
1867 int *errcode)
1da177e4
LT
1868{
1869 struct sk_buff *skb;
7d877f3b 1870 gfp_t gfp_mask;
1da177e4
LT
1871 long timeo;
1872 int err;
cc9b17ad
JW
1873 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1874
1875 err = -EMSGSIZE;
1876 if (npages > MAX_SKB_FRAGS)
1877 goto failure;
1da177e4
LT
1878
1879 gfp_mask = sk->sk_allocation;
1880 if (gfp_mask & __GFP_WAIT)
1881 gfp_mask |= __GFP_REPEAT;
1882
1883 timeo = sock_sndtimeo(sk, noblock);
1884 while (1) {
1885 err = sock_error(sk);
1886 if (err != 0)
1887 goto failure;
1888
1889 err = -EPIPE;
1890 if (sk->sk_shutdown & SEND_SHUTDOWN)
1891 goto failure;
1892
1893 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
db38c179 1894 skb = alloc_skb(header_len, gfp_mask);
1da177e4 1895 if (skb) {
1da177e4
LT
1896 int i;
1897
1898 /* No pages, we're done... */
1899 if (!data_len)
1900 break;
1901
1da177e4
LT
1902 skb->truesize += data_len;
1903 skb_shinfo(skb)->nr_frags = npages;
1904 for (i = 0; i < npages; i++) {
1905 struct page *page;
1da177e4
LT
1906
1907 page = alloc_pages(sk->sk_allocation, 0);
1908 if (!page) {
1909 err = -ENOBUFS;
1910 skb_shinfo(skb)->nr_frags = i;
1911 kfree_skb(skb);
1912 goto failure;
1913 }
1914
ea2ab693
IC
1915 __skb_fill_page_desc(skb, i,
1916 page, 0,
1917 (data_len >= PAGE_SIZE ?
1918 PAGE_SIZE :
1919 data_len));
1da177e4
LT
1920 data_len -= PAGE_SIZE;
1921 }
1922
1923 /* Full success... */
1924 break;
1925 }
1926 err = -ENOBUFS;
1927 goto failure;
1928 }
1929 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1930 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1931 err = -EAGAIN;
1932 if (!timeo)
1933 goto failure;
1934 if (signal_pending(current))
1935 goto interrupted;
6fa3eb70
S
1936
1937 sock_dump_info(sk);
1938 #ifdef CONFIG_MTK_NET_LOGGING
1939 printk(KERN_INFO "[mtk_net][sock]sockdbg: wait_for_wmem, timeo =%ld, wmem =%d, snd buf =%d \n",
1940 timeo, atomic_read(&sk->sk_wmem_alloc), sk->sk_sndbuf);
1941 #endif
1da177e4 1942 timeo = sock_wait_for_wmem(sk, timeo);
6fa3eb70
S
1943 #ifdef CONFIG_MTK_NET_LOGGING
1944 printk(KERN_INFO "[mtk_net][sock]sockdbg: wait_for_wmem done, header_len=0x%lx, data_len=0x%lx,timeo =%ld \n",
1945 header_len, data_len ,timeo);
1946 #endif
1da177e4
LT
1947 }
1948
1949 skb_set_owner_w(skb, sk);
1950 return skb;
1951
1952interrupted:
1953 err = sock_intr_errno(timeo);
1954failure:
1955 *errcode = err;
1956 return NULL;
1957}
4cc7f68d 1958EXPORT_SYMBOL(sock_alloc_send_pskb);
1da177e4 1959
4ec93edb 1960struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1da177e4
LT
1961 int noblock, int *errcode)
1962{
1963 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1964}
2a91525c 1965EXPORT_SYMBOL(sock_alloc_send_skb);
1da177e4 1966
5640f768
ED
1967/* On 32bit arches, an skb frag is limited to 2^15 */
1968#define SKB_FRAG_PAGE_ORDER get_order(32768)
1969
1970bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1971{
1972 int order;
1973
1974 if (pfrag->page) {
1975 if (atomic_read(&pfrag->page->_count) == 1) {
1976 pfrag->offset = 0;
1977 return true;
1978 }
1979 if (pfrag->offset < pfrag->size)
1980 return true;
1981 put_page(pfrag->page);
1982 }
1983
1984 /* We restrict high order allocations to users that can afford to wait */
1985 order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
1986
1987 do {
1988 gfp_t gfp = sk->sk_allocation;
1989
1990 if (order)
a9e3d789 1991 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
5640f768
ED
1992 pfrag->page = alloc_pages(gfp, order);
1993 if (likely(pfrag->page)) {
1994 pfrag->offset = 0;
1995 pfrag->size = PAGE_SIZE << order;
1996 return true;
1997 }
1998 } while (--order >= 0);
1999
2000 sk_enter_memory_pressure(sk);
2001 sk_stream_moderate_sndbuf(sk);
2002 return false;
2003}
2004EXPORT_SYMBOL(sk_page_frag_refill);
2005
1da177e4 2006static void __lock_sock(struct sock *sk)
f39234d6
NK
2007 __releases(&sk->sk_lock.slock)
2008 __acquires(&sk->sk_lock.slock)
1da177e4
LT
2009{
2010 DEFINE_WAIT(wait);
2011
e71a4783 2012 for (;;) {
1da177e4
LT
2013 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2014 TASK_UNINTERRUPTIBLE);
2015 spin_unlock_bh(&sk->sk_lock.slock);
2016 schedule();
2017 spin_lock_bh(&sk->sk_lock.slock);
e71a4783 2018 if (!sock_owned_by_user(sk))
1da177e4
LT
2019 break;
2020 }
2021 finish_wait(&sk->sk_lock.wq, &wait);
2022}
2023
2024static void __release_sock(struct sock *sk)
f39234d6
NK
2025 __releases(&sk->sk_lock.slock)
2026 __acquires(&sk->sk_lock.slock)
1da177e4
LT
2027{
2028 struct sk_buff *skb = sk->sk_backlog.head;
2029
2030 do {
2031 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2032 bh_unlock_sock(sk);
2033
2034 do {
2035 struct sk_buff *next = skb->next;
2036
e4cbb02a 2037 prefetch(next);
7fee226a 2038 WARN_ON_ONCE(skb_dst_is_noref(skb));
1da177e4 2039 skb->next = NULL;
c57943a1 2040 sk_backlog_rcv(sk, skb);
1da177e4
LT
2041
2042 /*
2043 * We are in process context here with softirqs
2044 * disabled, use cond_resched_softirq() to preempt.
2045 * This is safe to do because we've taken the backlog
2046 * queue private:
2047 */
2048 cond_resched_softirq();
2049
2050 skb = next;
2051 } while (skb != NULL);
2052
2053 bh_lock_sock(sk);
e71a4783 2054 } while ((skb = sk->sk_backlog.head) != NULL);
8eae939f
ZY
2055
2056 /*
2057 * Doing the zeroing here guarantee we can not loop forever
2058 * while a wild producer attempts to flood us.
2059 */
2060 sk->sk_backlog.len = 0;
1da177e4
LT
2061}
2062
2063/**
2064 * sk_wait_data - wait for data to arrive at sk_receive_queue
4dc3b16b
PP
2065 * @sk: sock to wait on
2066 * @timeo: for how long
1da177e4
LT
2067 *
2068 * Now socket state including sk->sk_err is changed only under lock,
2069 * hence we may omit checks after joining wait queue.
2070 * We check receive queue before schedule() only as optimization;
2071 * it is very likely that release_sock() added new data.
2072 */
2073int sk_wait_data(struct sock *sk, long *timeo)
2074{
2075 int rc;
2076 DEFINE_WAIT(wait);
2077
aa395145 2078 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1da177e4
LT
2079 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2080 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
2081 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
aa395145 2082 finish_wait(sk_sleep(sk), &wait);
1da177e4
LT
2083 return rc;
2084}
1da177e4
LT
2085EXPORT_SYMBOL(sk_wait_data);
2086
3ab224be
HA
2087/**
2088 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2089 * @sk: socket
2090 * @size: memory size to allocate
2091 * @kind: allocation type
2092 *
2093 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2094 * rmem allocation. This function assumes that protocols which have
2095 * memory_pressure use sk_wmem_queued as write buffer accounting.
2096 */
2097int __sk_mem_schedule(struct sock *sk, int size, int kind)
2098{
2099 struct proto *prot = sk->sk_prot;
2100 int amt = sk_mem_pages(size);
8d987e5c 2101 long allocated;
e1aab161 2102 int parent_status = UNDER_LIMIT;
3ab224be
HA
2103
2104 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
180d8cd9 2105
e1aab161 2106 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
3ab224be
HA
2107
2108 /* Under limit. */
e1aab161
GC
2109 if (parent_status == UNDER_LIMIT &&
2110 allocated <= sk_prot_mem_limits(sk, 0)) {
180d8cd9 2111 sk_leave_memory_pressure(sk);
3ab224be
HA
2112 return 1;
2113 }
2114
e1aab161
GC
2115 /* Under pressure. (we or our parents) */
2116 if ((parent_status > SOFT_LIMIT) ||
2117 allocated > sk_prot_mem_limits(sk, 1))
180d8cd9 2118 sk_enter_memory_pressure(sk);
3ab224be 2119
e1aab161
GC
2120 /* Over hard limit (we or our parents) */
2121 if ((parent_status == OVER_LIMIT) ||
2122 (allocated > sk_prot_mem_limits(sk, 2)))
3ab224be
HA
2123 goto suppress_allocation;
2124
2125 /* guarantee minimum buffer size under pressure */
2126 if (kind == SK_MEM_RECV) {
2127 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2128 return 1;
180d8cd9 2129
3ab224be
HA
2130 } else { /* SK_MEM_SEND */
2131 if (sk->sk_type == SOCK_STREAM) {
2132 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2133 return 1;
2134 } else if (atomic_read(&sk->sk_wmem_alloc) <
2135 prot->sysctl_wmem[0])
2136 return 1;
2137 }
2138
180d8cd9 2139 if (sk_has_memory_pressure(sk)) {
1748376b
ED
2140 int alloc;
2141
180d8cd9 2142 if (!sk_under_memory_pressure(sk))
1748376b 2143 return 1;
180d8cd9
GC
2144 alloc = sk_sockets_allocated_read_positive(sk);
2145 if (sk_prot_mem_limits(sk, 2) > alloc *
3ab224be
HA
2146 sk_mem_pages(sk->sk_wmem_queued +
2147 atomic_read(&sk->sk_rmem_alloc) +
2148 sk->sk_forward_alloc))
2149 return 1;
2150 }
2151
2152suppress_allocation:
2153
2154 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2155 sk_stream_moderate_sndbuf(sk);
2156
2157 /* Fail only if socket is _under_ its sndbuf.
2158 * In this case we cannot block, so that we have to fail.
2159 */
2160 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2161 return 1;
2162 }
2163
3847ce32
SM
2164 trace_sock_exceed_buf_limit(sk, prot, allocated);
2165
3ab224be
HA
2166 /* Alas. Undo changes. */
2167 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
180d8cd9 2168
0e90b31f 2169 sk_memory_allocated_sub(sk, amt);
180d8cd9 2170
3ab224be
HA
2171 return 0;
2172}
3ab224be
HA
2173EXPORT_SYMBOL(__sk_mem_schedule);
2174
2175/**
2176 * __sk_reclaim - reclaim memory_allocated
2177 * @sk: socket
2178 */
2179void __sk_mem_reclaim(struct sock *sk)
2180{
180d8cd9 2181 sk_memory_allocated_sub(sk,
0e90b31f 2182 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
3ab224be
HA
2183 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2184
180d8cd9
GC
2185 if (sk_under_memory_pressure(sk) &&
2186 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2187 sk_leave_memory_pressure(sk);
3ab224be 2188}
3ab224be
HA
2189EXPORT_SYMBOL(__sk_mem_reclaim);
2190
2191
1da177e4
LT
2192/*
2193 * Set of default routines for initialising struct proto_ops when
2194 * the protocol does not support a particular function. In certain
2195 * cases where it makes no sense for a protocol to have a "do nothing"
2196 * function, some default processing is provided.
2197 */
2198
2199int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2200{
2201 return -EOPNOTSUPP;
2202}
2a91525c 2203EXPORT_SYMBOL(sock_no_bind);
1da177e4 2204
4ec93edb 2205int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1da177e4
LT
2206 int len, int flags)
2207{
2208 return -EOPNOTSUPP;
2209}
2a91525c 2210EXPORT_SYMBOL(sock_no_connect);
1da177e4
LT
2211
2212int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2213{
2214 return -EOPNOTSUPP;
2215}
2a91525c 2216EXPORT_SYMBOL(sock_no_socketpair);
1da177e4
LT
2217
2218int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2219{
2220 return -EOPNOTSUPP;
2221}
2a91525c 2222EXPORT_SYMBOL(sock_no_accept);
1da177e4 2223
4ec93edb 2224int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1da177e4
LT
2225 int *len, int peer)
2226{
2227 return -EOPNOTSUPP;
2228}
2a91525c 2229EXPORT_SYMBOL(sock_no_getname);
1da177e4 2230
2a91525c 2231unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1da177e4
LT
2232{
2233 return 0;
2234}
2a91525c 2235EXPORT_SYMBOL(sock_no_poll);
1da177e4
LT
2236
2237int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2238{
2239 return -EOPNOTSUPP;
2240}
2a91525c 2241EXPORT_SYMBOL(sock_no_ioctl);
1da177e4
LT
2242
2243int sock_no_listen(struct socket *sock, int backlog)
2244{
2245 return -EOPNOTSUPP;
2246}
2a91525c 2247EXPORT_SYMBOL(sock_no_listen);
1da177e4
LT
2248
2249int sock_no_shutdown(struct socket *sock, int how)
2250{
2251 return -EOPNOTSUPP;
2252}
2a91525c 2253EXPORT_SYMBOL(sock_no_shutdown);
1da177e4
LT
2254
2255int sock_no_setsockopt(struct socket *sock, int level, int optname,
b7058842 2256 char __user *optval, unsigned int optlen)
1da177e4
LT
2257{
2258 return -EOPNOTSUPP;
2259}
2a91525c 2260EXPORT_SYMBOL(sock_no_setsockopt);
1da177e4
LT
2261
2262int sock_no_getsockopt(struct socket *sock, int level, int optname,
2263 char __user *optval, int __user *optlen)
2264{
2265 return -EOPNOTSUPP;
2266}
2a91525c 2267EXPORT_SYMBOL(sock_no_getsockopt);
1da177e4
LT
2268
2269int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2270 size_t len)
2271{
2272 return -EOPNOTSUPP;
2273}
2a91525c 2274EXPORT_SYMBOL(sock_no_sendmsg);
1da177e4
LT
2275
2276int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2277 size_t len, int flags)
2278{
2279 return -EOPNOTSUPP;
2280}
2a91525c 2281EXPORT_SYMBOL(sock_no_recvmsg);
1da177e4
LT
2282
2283int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2284{
2285 /* Mirror missing mmap method error code */
2286 return -ENODEV;
2287}
2a91525c 2288EXPORT_SYMBOL(sock_no_mmap);
1da177e4
LT
2289
2290ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2291{
2292 ssize_t res;
2293 struct msghdr msg = {.msg_flags = flags};
2294 struct kvec iov;
2295 char *kaddr = kmap(page);
2296 iov.iov_base = kaddr + offset;
2297 iov.iov_len = size;
2298 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2299 kunmap(page);
2300 return res;
2301}
2a91525c 2302EXPORT_SYMBOL(sock_no_sendpage);
1da177e4
LT
2303
2304/*
2305 * Default Socket Callbacks
2306 */
2307
2308static void sock_def_wakeup(struct sock *sk)
2309{
43815482
ED
2310 struct socket_wq *wq;
2311
2312 rcu_read_lock();
2313 wq = rcu_dereference(sk->sk_wq);
2314 if (wq_has_sleeper(wq))
2315 wake_up_interruptible_all(&wq->wait);
2316 rcu_read_unlock();
1da177e4
LT
2317}
2318
2319static void sock_def_error_report(struct sock *sk)
2320{
43815482
ED
2321 struct socket_wq *wq;
2322
2323 rcu_read_lock();
2324 wq = rcu_dereference(sk->sk_wq);
2325 if (wq_has_sleeper(wq))
2326 wake_up_interruptible_poll(&wq->wait, POLLERR);
8d8ad9d7 2327 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
43815482 2328 rcu_read_unlock();
1da177e4
LT
2329}
2330
2331static void sock_def_readable(struct sock *sk, int len)
2332{
43815482
ED
2333 struct socket_wq *wq;
2334
2335 rcu_read_lock();
2336 wq = rcu_dereference(sk->sk_wq);
2337 if (wq_has_sleeper(wq))
2c6607c6 2338 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
37e5540b 2339 POLLRDNORM | POLLRDBAND);
8d8ad9d7 2340 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
43815482 2341 rcu_read_unlock();
1da177e4
LT
2342}
2343
2344static void sock_def_write_space(struct sock *sk)
2345{
43815482
ED
2346 struct socket_wq *wq;
2347
2348 rcu_read_lock();
1da177e4
LT
2349
2350 /* Do not wake up a writer until he can make "significant"
2351 * progress. --DaveM
2352 */
e71a4783 2353 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
43815482
ED
2354 wq = rcu_dereference(sk->sk_wq);
2355 if (wq_has_sleeper(wq))
2356 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
37e5540b 2357 POLLWRNORM | POLLWRBAND);
1da177e4
LT
2358
2359 /* Should agree with poll, otherwise some programs break */
2360 if (sock_writeable(sk))
8d8ad9d7 2361 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1da177e4
LT
2362 }
2363
43815482 2364 rcu_read_unlock();
1da177e4
LT
2365}
2366
2367static void sock_def_destruct(struct sock *sk)
2368{
a51482bd 2369 kfree(sk->sk_protinfo);
1da177e4
LT
2370}
2371
2372void sk_send_sigurg(struct sock *sk)
2373{
2374 if (sk->sk_socket && sk->sk_socket->file)
2375 if (send_sigurg(&sk->sk_socket->file->f_owner))
8d8ad9d7 2376 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1da177e4 2377}
2a91525c 2378EXPORT_SYMBOL(sk_send_sigurg);
1da177e4
LT
2379
2380void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2381 unsigned long expires)
2382{
2383 if (!mod_timer(timer, expires))
2384 sock_hold(sk);
2385}
1da177e4
LT
2386EXPORT_SYMBOL(sk_reset_timer);
2387
2388void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2389{
25cc4ae9 2390 if (del_timer(timer))
1da177e4
LT
2391 __sock_put(sk);
2392}
1da177e4
LT
2393EXPORT_SYMBOL(sk_stop_timer);
2394
2395void sock_init_data(struct socket *sock, struct sock *sk)
2396{
2397 skb_queue_head_init(&sk->sk_receive_queue);
2398 skb_queue_head_init(&sk->sk_write_queue);
2399 skb_queue_head_init(&sk->sk_error_queue);
97fc2f08
CL
2400#ifdef CONFIG_NET_DMA
2401 skb_queue_head_init(&sk->sk_async_wait_queue);
2402#endif
1da177e4
LT
2403
2404 sk->sk_send_head = NULL;
2405
2406 init_timer(&sk->sk_timer);
4ec93edb 2407
1da177e4
LT
2408 sk->sk_allocation = GFP_KERNEL;
2409 sk->sk_rcvbuf = sysctl_rmem_default;
2410 sk->sk_sndbuf = sysctl_wmem_default;
2411 sk->sk_state = TCP_CLOSE;
972692e0 2412 sk_set_socket(sk, sock);
1da177e4
LT
2413
2414 sock_set_flag(sk, SOCK_ZAPPED);
2415
e71a4783 2416 if (sock) {
1da177e4 2417 sk->sk_type = sock->type;
43815482 2418 sk->sk_wq = sock->wq;
1da177e4
LT
2419 sock->sk = sk;
2420 } else
43815482 2421 sk->sk_wq = NULL;
1da177e4 2422
b6c6712a 2423 spin_lock_init(&sk->sk_dst_lock);
1da177e4 2424 rwlock_init(&sk->sk_callback_lock);
443aef0e
PZ
2425 lockdep_set_class_and_name(&sk->sk_callback_lock,
2426 af_callback_keys + sk->sk_family,
2427 af_family_clock_key_strings[sk->sk_family]);
1da177e4
LT
2428
2429 sk->sk_state_change = sock_def_wakeup;
2430 sk->sk_data_ready = sock_def_readable;
2431 sk->sk_write_space = sock_def_write_space;
2432 sk->sk_error_report = sock_def_error_report;
2433 sk->sk_destruct = sock_def_destruct;
2434
5640f768
ED
2435 sk->sk_frag.page = NULL;
2436 sk->sk_frag.offset = 0;
ef64a54f 2437 sk->sk_peek_off = -1;
1da177e4 2438
109f6e39
EB
2439 sk->sk_peer_pid = NULL;
2440 sk->sk_peer_cred = NULL;
1da177e4
LT
2441 sk->sk_write_pending = 0;
2442 sk->sk_rcvlowat = 1;
2443 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2444 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2445
f37f0afb 2446 sk->sk_stamp = ktime_set(-1L, 0);
1da177e4 2447
5e25ba50 2448 sk->sk_pacing_rate = ~0U;
4dc6dc71
ED
2449 /*
2450 * Before updating sk_refcnt, we must commit prior changes to memory
2451 * (Documentation/RCU/rculist_nulls.txt for details)
2452 */
2453 smp_wmb();
1da177e4 2454 atomic_set(&sk->sk_refcnt, 1);
33c732c3 2455 atomic_set(&sk->sk_drops, 0);
1da177e4 2456}
2a91525c 2457EXPORT_SYMBOL(sock_init_data);
1da177e4 2458
b5606c2d 2459void lock_sock_nested(struct sock *sk, int subclass)
1da177e4
LT
2460{
2461 might_sleep();
a5b5bb9a 2462 spin_lock_bh(&sk->sk_lock.slock);
d2e9117c 2463 if (sk->sk_lock.owned)
1da177e4 2464 __lock_sock(sk);
d2e9117c 2465 sk->sk_lock.owned = 1;
a5b5bb9a
IM
2466 spin_unlock(&sk->sk_lock.slock);
2467 /*
2468 * The sk_lock has mutex_lock() semantics here:
2469 */
fcc70d5f 2470 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
a5b5bb9a 2471 local_bh_enable();
1da177e4 2472}
fcc70d5f 2473EXPORT_SYMBOL(lock_sock_nested);
1da177e4 2474
b5606c2d 2475void release_sock(struct sock *sk)
1da177e4 2476{
a5b5bb9a
IM
2477 /*
2478 * The sk_lock has mutex_unlock() semantics:
2479 */
2480 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2481
2482 spin_lock_bh(&sk->sk_lock.slock);
1da177e4
LT
2483 if (sk->sk_backlog.tail)
2484 __release_sock(sk);
46d3ceab 2485
cbbb5a25
ED
2486 /* Warning : release_cb() might need to release sk ownership,
2487 * ie call sock_release_ownership(sk) before us.
2488 */
46d3ceab
ED
2489 if (sk->sk_prot->release_cb)
2490 sk->sk_prot->release_cb(sk);
2491
cbbb5a25 2492 sock_release_ownership(sk);
a5b5bb9a
IM
2493 if (waitqueue_active(&sk->sk_lock.wq))
2494 wake_up(&sk->sk_lock.wq);
2495 spin_unlock_bh(&sk->sk_lock.slock);
1da177e4
LT
2496}
2497EXPORT_SYMBOL(release_sock);
2498
8a74ad60
ED
2499/**
2500 * lock_sock_fast - fast version of lock_sock
2501 * @sk: socket
2502 *
2503 * This version should be used for very small section, where process wont block
2504 * return false if fast path is taken
2505 * sk_lock.slock locked, owned = 0, BH disabled
2506 * return true if slow path is taken
2507 * sk_lock.slock unlocked, owned = 1, BH enabled
2508 */
2509bool lock_sock_fast(struct sock *sk)
2510{
2511 might_sleep();
2512 spin_lock_bh(&sk->sk_lock.slock);
2513
2514 if (!sk->sk_lock.owned)
2515 /*
2516 * Note : We must disable BH
2517 */
2518 return false;
2519
2520 __lock_sock(sk);
2521 sk->sk_lock.owned = 1;
2522 spin_unlock(&sk->sk_lock.slock);
2523 /*
2524 * The sk_lock has mutex_lock() semantics here:
2525 */
2526 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2527 local_bh_enable();
2528 return true;
2529}
2530EXPORT_SYMBOL(lock_sock_fast);
2531
1da177e4 2532int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
4ec93edb 2533{
b7aa0bf7 2534 struct timeval tv;
1da177e4 2535 if (!sock_flag(sk, SOCK_TIMESTAMP))
20d49473 2536 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
b7aa0bf7
ED
2537 tv = ktime_to_timeval(sk->sk_stamp);
2538 if (tv.tv_sec == -1)
1da177e4 2539 return -ENOENT;
b7aa0bf7
ED
2540 if (tv.tv_sec == 0) {
2541 sk->sk_stamp = ktime_get_real();
2542 tv = ktime_to_timeval(sk->sk_stamp);
2543 }
2544 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
4ec93edb 2545}
1da177e4
LT
2546EXPORT_SYMBOL(sock_get_timestamp);
2547
ae40eb1e
ED
2548int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2549{
2550 struct timespec ts;
2551 if (!sock_flag(sk, SOCK_TIMESTAMP))
20d49473 2552 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
ae40eb1e
ED
2553 ts = ktime_to_timespec(sk->sk_stamp);
2554 if (ts.tv_sec == -1)
2555 return -ENOENT;
2556 if (ts.tv_sec == 0) {
2557 sk->sk_stamp = ktime_get_real();
2558 ts = ktime_to_timespec(sk->sk_stamp);
2559 }
2560 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2561}
2562EXPORT_SYMBOL(sock_get_timestampns);
2563
20d49473 2564void sock_enable_timestamp(struct sock *sk, int flag)
4ec93edb 2565{
20d49473 2566 if (!sock_flag(sk, flag)) {
08e29af3
ED
2567 unsigned long previous_flags = sk->sk_flags;
2568
20d49473
PO
2569 sock_set_flag(sk, flag);
2570 /*
2571 * we just set one of the two flags which require net
2572 * time stamping, but time stamping might have been on
2573 * already because of the other one
2574 */
08e29af3 2575 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
20d49473 2576 net_enable_timestamp();
1da177e4
LT
2577 }
2578}
1da177e4
LT
2579
2580/*
2581 * Get a socket option on an socket.
2582 *
2583 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2584 * asynchronous errors should be reported by getsockopt. We assume
2585 * this means if you specify SO_ERROR (otherwise whats the point of it).
2586 */
2587int sock_common_getsockopt(struct socket *sock, int level, int optname,
2588 char __user *optval, int __user *optlen)
2589{
2590 struct sock *sk = sock->sk;
2591
2592 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2593}
1da177e4
LT
2594EXPORT_SYMBOL(sock_common_getsockopt);
2595
3fdadf7d 2596#ifdef CONFIG_COMPAT
543d9cfe
ACM
2597int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2598 char __user *optval, int __user *optlen)
3fdadf7d
DM
2599{
2600 struct sock *sk = sock->sk;
2601
1e51f951 2602 if (sk->sk_prot->compat_getsockopt != NULL)
543d9cfe
ACM
2603 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2604 optval, optlen);
3fdadf7d
DM
2605 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2606}
2607EXPORT_SYMBOL(compat_sock_common_getsockopt);
2608#endif
2609
1da177e4
LT
2610int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2611 struct msghdr *msg, size_t size, int flags)
2612{
2613 struct sock *sk = sock->sk;
2614 int addr_len = 0;
2615 int err;
2616
2617 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2618 flags & ~MSG_DONTWAIT, &addr_len);
2619 if (err >= 0)
2620 msg->msg_namelen = addr_len;
2621 return err;
2622}
1da177e4
LT
2623EXPORT_SYMBOL(sock_common_recvmsg);
2624
2625/*
2626 * Set socket options on an inet socket.
2627 */
2628int sock_common_setsockopt(struct socket *sock, int level, int optname,
b7058842 2629 char __user *optval, unsigned int optlen)
1da177e4
LT
2630{
2631 struct sock *sk = sock->sk;
2632
2633 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2634}
1da177e4
LT
2635EXPORT_SYMBOL(sock_common_setsockopt);
2636
3fdadf7d 2637#ifdef CONFIG_COMPAT
543d9cfe 2638int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
b7058842 2639 char __user *optval, unsigned int optlen)
3fdadf7d
DM
2640{
2641 struct sock *sk = sock->sk;
2642
543d9cfe
ACM
2643 if (sk->sk_prot->compat_setsockopt != NULL)
2644 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2645 optval, optlen);
3fdadf7d
DM
2646 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2647}
2648EXPORT_SYMBOL(compat_sock_common_setsockopt);
2649#endif
2650
1da177e4
LT
2651void sk_common_release(struct sock *sk)
2652{
2653 if (sk->sk_prot->destroy)
2654 sk->sk_prot->destroy(sk);
2655
2656 /*
2657 * Observation: when sock_common_release is called, processes have
2658 * no access to socket. But net still has.
2659 * Step one, detach it from networking:
2660 *
2661 * A. Remove from hash tables.
2662 */
2663
2664 sk->sk_prot->unhash(sk);
2665
2666 /*
2667 * In this point socket cannot receive new packets, but it is possible
2668 * that some packets are in flight because some CPU runs receiver and
2669 * did hash table lookup before we unhashed socket. They will achieve
2670 * receive queue and will be purged by socket destructor.
2671 *
2672 * Also we still have packets pending on receive queue and probably,
2673 * our own packets waiting in device queues. sock_destroy will drain
2674 * receive queue, but transmitted packets will delay socket destruction
2675 * until the last reference will be released.
2676 */
2677
2678 sock_orphan(sk);
2679
2680 xfrm_sk_free_policy(sk);
2681
e6848976 2682 sk_refcnt_debug_release(sk);
5640f768
ED
2683
2684 if (sk->sk_frag.page) {
2685 put_page(sk->sk_frag.page);
2686 sk->sk_frag.page = NULL;
2687 }
2688
1da177e4
LT
2689 sock_put(sk);
2690}
1da177e4
LT
2691EXPORT_SYMBOL(sk_common_release);
2692
13ff3d6f
PE
2693#ifdef CONFIG_PROC_FS
2694#define PROTO_INUSE_NR 64 /* should be enough for the first time */
1338d466
PE
2695struct prot_inuse {
2696 int val[PROTO_INUSE_NR];
2697};
13ff3d6f
PE
2698
2699static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
70ee1159
PE
2700
2701#ifdef CONFIG_NET_NS
2702void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2703{
d6d9ca0f 2704 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
70ee1159
PE
2705}
2706EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2707
2708int sock_prot_inuse_get(struct net *net, struct proto *prot)
2709{
2710 int cpu, idx = prot->inuse_idx;
2711 int res = 0;
2712
2713 for_each_possible_cpu(cpu)
2714 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2715
2716 return res >= 0 ? res : 0;
2717}
2718EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2719
2c8c1e72 2720static int __net_init sock_inuse_init_net(struct net *net)
70ee1159
PE
2721{
2722 net->core.inuse = alloc_percpu(struct prot_inuse);
2723 return net->core.inuse ? 0 : -ENOMEM;
2724}
2725
2c8c1e72 2726static void __net_exit sock_inuse_exit_net(struct net *net)
70ee1159
PE
2727{
2728 free_percpu(net->core.inuse);
2729}
2730
2731static struct pernet_operations net_inuse_ops = {
2732 .init = sock_inuse_init_net,
2733 .exit = sock_inuse_exit_net,
2734};
2735
2736static __init int net_inuse_init(void)
2737{
2738 if (register_pernet_subsys(&net_inuse_ops))
2739 panic("Cannot initialize net inuse counters");
2740
2741 return 0;
2742}
2743
2744core_initcall(net_inuse_init);
2745#else
1338d466
PE
2746static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2747
c29a0bc4 2748void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
1338d466 2749{
d6d9ca0f 2750 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
1338d466
PE
2751}
2752EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2753
c29a0bc4 2754int sock_prot_inuse_get(struct net *net, struct proto *prot)
1338d466
PE
2755{
2756 int cpu, idx = prot->inuse_idx;
2757 int res = 0;
2758
2759 for_each_possible_cpu(cpu)
2760 res += per_cpu(prot_inuse, cpu).val[idx];
2761
2762 return res >= 0 ? res : 0;
2763}
2764EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
70ee1159 2765#endif
13ff3d6f
PE
2766
2767static void assign_proto_idx(struct proto *prot)
2768{
2769 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2770
2771 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
e005d193 2772 pr_err("PROTO_INUSE_NR exhausted\n");
13ff3d6f
PE
2773 return;
2774 }
2775
2776 set_bit(prot->inuse_idx, proto_inuse_idx);
2777}
2778
2779static void release_proto_idx(struct proto *prot)
2780{
2781 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2782 clear_bit(prot->inuse_idx, proto_inuse_idx);
2783}
2784#else
2785static inline void assign_proto_idx(struct proto *prot)
2786{
2787}
2788
2789static inline void release_proto_idx(struct proto *prot)
2790{
2791}
2792#endif
2793
b733c007
PE
2794int proto_register(struct proto *prot, int alloc_slab)
2795{
1da177e4
LT
2796 if (alloc_slab) {
2797 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
271b72c7
ED
2798 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2799 NULL);
1da177e4
LT
2800
2801 if (prot->slab == NULL) {
e005d193
JP
2802 pr_crit("%s: Can't create sock SLAB cache!\n",
2803 prot->name);
60e7663d 2804 goto out;
1da177e4 2805 }
2e6599cb
ACM
2806
2807 if (prot->rsk_prot != NULL) {
faf23422 2808 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
7e56b5d6 2809 if (prot->rsk_prot->slab_name == NULL)
2e6599cb
ACM
2810 goto out_free_sock_slab;
2811
7e56b5d6 2812 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2e6599cb 2813 prot->rsk_prot->obj_size, 0,
20c2df83 2814 SLAB_HWCACHE_ALIGN, NULL);
2e6599cb
ACM
2815
2816 if (prot->rsk_prot->slab == NULL) {
e005d193
JP
2817 pr_crit("%s: Can't create request sock SLAB cache!\n",
2818 prot->name);
2e6599cb
ACM
2819 goto out_free_request_sock_slab_name;
2820 }
2821 }
8feaf0c0 2822
6d6ee43e 2823 if (prot->twsk_prot != NULL) {
faf23422 2824 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
8feaf0c0 2825
7e56b5d6 2826 if (prot->twsk_prot->twsk_slab_name == NULL)
8feaf0c0
ACM
2827 goto out_free_request_sock_slab;
2828
6d6ee43e 2829 prot->twsk_prot->twsk_slab =
7e56b5d6 2830 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
6d6ee43e 2831 prot->twsk_prot->twsk_obj_size,
3ab5aee7
ED
2832 0,
2833 SLAB_HWCACHE_ALIGN |
2834 prot->slab_flags,
20c2df83 2835 NULL);
6d6ee43e 2836 if (prot->twsk_prot->twsk_slab == NULL)
8feaf0c0
ACM
2837 goto out_free_timewait_sock_slab_name;
2838 }
1da177e4
LT
2839 }
2840
36b77a52 2841 mutex_lock(&proto_list_mutex);
1da177e4 2842 list_add(&prot->node, &proto_list);
13ff3d6f 2843 assign_proto_idx(prot);
36b77a52 2844 mutex_unlock(&proto_list_mutex);
b733c007
PE
2845 return 0;
2846
8feaf0c0 2847out_free_timewait_sock_slab_name:
7e56b5d6 2848 kfree(prot->twsk_prot->twsk_slab_name);
8feaf0c0
ACM
2849out_free_request_sock_slab:
2850 if (prot->rsk_prot && prot->rsk_prot->slab) {
2851 kmem_cache_destroy(prot->rsk_prot->slab);
2852 prot->rsk_prot->slab = NULL;
2853 }
2e6599cb 2854out_free_request_sock_slab_name:
72150e9b
DC
2855 if (prot->rsk_prot)
2856 kfree(prot->rsk_prot->slab_name);
2e6599cb
ACM
2857out_free_sock_slab:
2858 kmem_cache_destroy(prot->slab);
2859 prot->slab = NULL;
b733c007
PE
2860out:
2861 return -ENOBUFS;
1da177e4 2862}
1da177e4
LT
2863EXPORT_SYMBOL(proto_register);
2864
2865void proto_unregister(struct proto *prot)
2866{
36b77a52 2867 mutex_lock(&proto_list_mutex);
13ff3d6f 2868 release_proto_idx(prot);
0a3f4358 2869 list_del(&prot->node);
36b77a52 2870 mutex_unlock(&proto_list_mutex);
1da177e4
LT
2871
2872 if (prot->slab != NULL) {
2873 kmem_cache_destroy(prot->slab);
2874 prot->slab = NULL;
2875 }
2876
2e6599cb 2877 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2e6599cb 2878 kmem_cache_destroy(prot->rsk_prot->slab);
7e56b5d6 2879 kfree(prot->rsk_prot->slab_name);
2e6599cb
ACM
2880 prot->rsk_prot->slab = NULL;
2881 }
2882
6d6ee43e 2883 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
6d6ee43e 2884 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
7e56b5d6 2885 kfree(prot->twsk_prot->twsk_slab_name);
6d6ee43e 2886 prot->twsk_prot->twsk_slab = NULL;
8feaf0c0 2887 }
1da177e4 2888}
1da177e4
LT
2889EXPORT_SYMBOL(proto_unregister);
2890
2891#ifdef CONFIG_PROC_FS
1da177e4 2892static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
36b77a52 2893 __acquires(proto_list_mutex)
1da177e4 2894{
36b77a52 2895 mutex_lock(&proto_list_mutex);
60f0438a 2896 return seq_list_start_head(&proto_list, *pos);
1da177e4
LT
2897}
2898
2899static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2900{
60f0438a 2901 return seq_list_next(v, &proto_list, pos);
1da177e4
LT
2902}
2903
2904static void proto_seq_stop(struct seq_file *seq, void *v)
36b77a52 2905 __releases(proto_list_mutex)
1da177e4 2906{
36b77a52 2907 mutex_unlock(&proto_list_mutex);
1da177e4
LT
2908}
2909
2910static char proto_method_implemented(const void *method)
2911{
2912 return method == NULL ? 'n' : 'y';
2913}
180d8cd9
GC
2914static long sock_prot_memory_allocated(struct proto *proto)
2915{
cb75a36c 2916 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
180d8cd9
GC
2917}
2918
2919static char *sock_prot_memory_pressure(struct proto *proto)
2920{
2921 return proto->memory_pressure != NULL ?
2922 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2923}
1da177e4
LT
2924
2925static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2926{
180d8cd9 2927
8d987e5c 2928 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
1da177e4
LT
2929 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2930 proto->name,
2931 proto->obj_size,
14e943db 2932 sock_prot_inuse_get(seq_file_net(seq), proto),
180d8cd9
GC
2933 sock_prot_memory_allocated(proto),
2934 sock_prot_memory_pressure(proto),
1da177e4
LT
2935 proto->max_header,
2936 proto->slab == NULL ? "no" : "yes",
2937 module_name(proto->owner),
2938 proto_method_implemented(proto->close),
2939 proto_method_implemented(proto->connect),
2940 proto_method_implemented(proto->disconnect),
2941 proto_method_implemented(proto->accept),
2942 proto_method_implemented(proto->ioctl),
2943 proto_method_implemented(proto->init),
2944 proto_method_implemented(proto->destroy),
2945 proto_method_implemented(proto->shutdown),
2946 proto_method_implemented(proto->setsockopt),
2947 proto_method_implemented(proto->getsockopt),
2948 proto_method_implemented(proto->sendmsg),
2949 proto_method_implemented(proto->recvmsg),
2950 proto_method_implemented(proto->sendpage),
2951 proto_method_implemented(proto->bind),
2952 proto_method_implemented(proto->backlog_rcv),
2953 proto_method_implemented(proto->hash),
2954 proto_method_implemented(proto->unhash),
2955 proto_method_implemented(proto->get_port),
2956 proto_method_implemented(proto->enter_memory_pressure));
2957}
2958
2959static int proto_seq_show(struct seq_file *seq, void *v)
2960{
60f0438a 2961 if (v == &proto_list)
1da177e4
LT
2962 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2963 "protocol",
2964 "size",
2965 "sockets",
2966 "memory",
2967 "press",
2968 "maxhdr",
2969 "slab",
2970 "module",
2971 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2972 else
60f0438a 2973 proto_seq_printf(seq, list_entry(v, struct proto, node));
1da177e4
LT
2974 return 0;
2975}
2976
f690808e 2977static const struct seq_operations proto_seq_ops = {
1da177e4
LT
2978 .start = proto_seq_start,
2979 .next = proto_seq_next,
2980 .stop = proto_seq_stop,
2981 .show = proto_seq_show,
2982};
2983
2984static int proto_seq_open(struct inode *inode, struct file *file)
2985{
14e943db
ED
2986 return seq_open_net(inode, file, &proto_seq_ops,
2987 sizeof(struct seq_net_private));
1da177e4
LT
2988}
2989
9a32144e 2990static const struct file_operations proto_seq_fops = {
1da177e4
LT
2991 .owner = THIS_MODULE,
2992 .open = proto_seq_open,
2993 .read = seq_read,
2994 .llseek = seq_lseek,
14e943db
ED
2995 .release = seq_release_net,
2996};
2997
2998static __net_init int proto_init_net(struct net *net)
2999{
d4beaa66 3000 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
14e943db
ED
3001 return -ENOMEM;
3002
3003 return 0;
3004}
3005
3006static __net_exit void proto_exit_net(struct net *net)
3007{
ece31ffd 3008 remove_proc_entry("protocols", net->proc_net);
14e943db
ED
3009}
3010
3011
3012static __net_initdata struct pernet_operations proto_net_ops = {
3013 .init = proto_init_net,
3014 .exit = proto_exit_net,
1da177e4
LT
3015};
3016
3017static int __init proto_init(void)
3018{
14e943db 3019 return register_pernet_subsys(&proto_net_ops);
1da177e4
LT
3020}
3021
3022subsys_initcall(proto_init);
3023
3024#endif /* PROC_FS */