Merge tag 'v3.10.107' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / unix / af_unix.c
1 /*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
93 #include <linux/un.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
98 #include <linux/in.h>
99 #include <linux/fs.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
110 #include <net/scm.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
117 #include <linux/freezer.h>
118
119
120 #include <linux/uio.h>
121 #include <linux/blkdev.h>
122 #include <linux/compat.h>
123 #include <linux/rtc.h>
124 #include <asm/kmap_types.h>
125 #include <linux/device.h>
126
127
128 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
129 EXPORT_SYMBOL_GPL(unix_socket_table);
130 DEFINE_SPINLOCK(unix_table_lock);
131 EXPORT_SYMBOL_GPL(unix_table_lock);
132 static atomic_long_t unix_nr_socks;
133
134
135 static struct hlist_head *unix_sockets_unbound(void *addr)
136 {
137 unsigned long hash = (unsigned long)addr;
138
139 hash ^= hash >> 16;
140 hash ^= hash >> 8;
141 hash %= UNIX_HASH_SIZE;
142 return &unix_socket_table[UNIX_HASH_SIZE + hash];
143 }
144
145 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
146
147
148 //for aee interface start
149 #define __UNIX_SOCKET_OUTPUT_BUF_SIZE__ 3500
150 static struct proc_dir_entry *gunix_socket_track_aee_entry = NULL;
151 #define UNIX_SOCK_TRACK_AEE_PROCNAME "driver/usktrk_aee"
152 #define UNIX_SOCK_TRACK_PROC_AEE_SIZE 3072
153
154 static volatile unsigned int unix_sock_track_stop_flag = 0;
155 #define unix_peer(sk) (unix_sk(sk)->peer)
156
157
158 #ifdef CONFIG_SECURITY_NETWORK
159 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
160 {
161 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
162 }
163
164 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
165 {
166 scm->secid = *UNIXSID(skb);
167 }
168 #else
169 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
170 { }
171
172 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
173 { }
174 #endif /* CONFIG_SECURITY_NETWORK */
175
176 /*
177 * SMP locking strategy:
178 * hash table is protected with spinlock unix_table_lock
179 * each socket state is protected by separate spin lock.
180 */
181
182 static inline unsigned int unix_hash_fold(__wsum n)
183 {
184 unsigned int hash = (__force unsigned int)csum_fold(n);
185
186 hash ^= hash>>8;
187 return hash&(UNIX_HASH_SIZE-1);
188 }
189
190
191
192 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
193 {
194 return unix_peer(osk) == sk;
195 }
196
197 static inline int unix_may_send(struct sock *sk, struct sock *osk)
198 {
199 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
200 }
201
202 static inline int unix_recvq_full(struct sock const *sk)
203 {
204 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
205 }
206
207 struct sock *unix_peer_get(struct sock *s)
208 {
209 struct sock *peer;
210
211 unix_state_lock(s);
212 peer = unix_peer(s);
213 if (peer)
214 sock_hold(peer);
215 unix_state_unlock(s);
216 return peer;
217 }
218 EXPORT_SYMBOL_GPL(unix_peer_get);
219
220 static inline void unix_release_addr(struct unix_address *addr)
221 {
222 if (atomic_dec_and_test(&addr->refcnt))
223 kfree(addr);
224 }
225
226 /*
227 * Check unix socket name:
228 * - should be not zero length.
229 * - if started by not zero, should be NULL terminated (FS object)
230 * - if started by zero, it is abstract name.
231 */
232
233 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
234 {
235 if (len <= sizeof(short) || len > sizeof(*sunaddr))
236 return -EINVAL;
237 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
238 return -EINVAL;
239 if (sunaddr->sun_path[0]) {
240 /*
241 * This may look like an off by one error but it is a bit more
242 * subtle. 108 is the longest valid AF_UNIX path for a binding.
243 * sun_path[108] doesn't as such exist. However in kernel space
244 * we are guaranteed that it is a valid memory location in our
245 * kernel address buffer.
246 */
247 ((char *)sunaddr)[len] = 0;
248 len = strlen(sunaddr->sun_path)+1+sizeof(short);
249 return len;
250 }
251
252 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
253 return len;
254 }
255
256 static void __unix_remove_socket(struct sock *sk)
257 {
258 sk_del_node_init(sk);
259 }
260
261 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
262 {
263 WARN_ON(!sk_unhashed(sk));
264 sk_add_node(sk, list);
265 }
266
267 static inline void unix_remove_socket(struct sock *sk)
268 {
269 spin_lock(&unix_table_lock);
270 __unix_remove_socket(sk);
271 spin_unlock(&unix_table_lock);
272 }
273
274 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
275 {
276 spin_lock(&unix_table_lock);
277 __unix_insert_socket(list, sk);
278 spin_unlock(&unix_table_lock);
279 }
280
281 static struct sock *__unix_find_socket_byname(struct net *net,
282 struct sockaddr_un *sunname,
283 int len, int type, unsigned int hash)
284 {
285 struct sock *s;
286
287 sk_for_each(s, &unix_socket_table[hash ^ type]) {
288 struct unix_sock *u = unix_sk(s);
289
290 if (!net_eq(sock_net(s), net))
291 continue;
292
293 if (u->addr->len == len &&
294 !memcmp(u->addr->name, sunname, len))
295 goto found;
296 }
297 s = NULL;
298 found:
299 return s;
300 }
301
302 static inline struct sock *unix_find_socket_byname(struct net *net,
303 struct sockaddr_un *sunname,
304 int len, int type,
305 unsigned int hash)
306 {
307 struct sock *s;
308
309 spin_lock(&unix_table_lock);
310 s = __unix_find_socket_byname(net, sunname, len, type, hash);
311 if (s)
312 sock_hold(s);
313 spin_unlock(&unix_table_lock);
314 return s;
315 }
316
317 static struct sock *unix_find_socket_byinode(struct inode *i)
318 {
319 struct sock *s;
320
321 spin_lock(&unix_table_lock);
322 sk_for_each(s,
323 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
324 struct dentry *dentry = unix_sk(s)->path.dentry;
325
326 if (dentry && dentry->d_inode == i) {
327 sock_hold(s);
328 goto found;
329 }
330 }
331 s = NULL;
332 found:
333 spin_unlock(&unix_table_lock);
334 return s;
335 }
336
337 /* Support code for asymmetrically connected dgram sockets
338 *
339 * If a datagram socket is connected to a socket not itself connected
340 * to the first socket (eg, /dev/log), clients may only enqueue more
341 * messages if the present receive queue of the server socket is not
342 * "too large". This means there's a second writeability condition
343 * poll and sendmsg need to test. The dgram recv code will do a wake
344 * up on the peer_wait wait queue of a socket upon reception of a
345 * datagram which needs to be propagated to sleeping would-be writers
346 * since these might not have sent anything so far. This can't be
347 * accomplished via poll_wait because the lifetime of the server
348 * socket might be less than that of its clients if these break their
349 * association with it or if the server socket is closed while clients
350 * are still connected to it and there's no way to inform "a polling
351 * implementation" that it should let go of a certain wait queue
352 *
353 * In order to propagate a wake up, a wait_queue_t of the client
354 * socket is enqueued on the peer_wait queue of the server socket
355 * whose wake function does a wake_up on the ordinary client socket
356 * wait queue. This connection is established whenever a write (or
357 * poll for write) hit the flow control condition and broken when the
358 * association to the server socket is dissolved or after a wake up
359 * was relayed.
360 */
361
362 static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
363 void *key)
364 {
365 struct unix_sock *u;
366 wait_queue_head_t *u_sleep;
367
368 u = container_of(q, struct unix_sock, peer_wake);
369
370 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
371 q);
372 u->peer_wake.private = NULL;
373
374 /* relaying can only happen while the wq still exists */
375 u_sleep = sk_sleep(&u->sk);
376 if (u_sleep)
377 wake_up_interruptible_poll(u_sleep, key);
378
379 return 0;
380 }
381
382 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
383 {
384 struct unix_sock *u, *u_other;
385 int rc;
386
387 u = unix_sk(sk);
388 u_other = unix_sk(other);
389 rc = 0;
390 spin_lock(&u_other->peer_wait.lock);
391
392 if (!u->peer_wake.private) {
393 u->peer_wake.private = other;
394 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
395
396 rc = 1;
397 }
398
399 spin_unlock(&u_other->peer_wait.lock);
400 return rc;
401 }
402
403 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
404 struct sock *other)
405 {
406 struct unix_sock *u, *u_other;
407
408 u = unix_sk(sk);
409 u_other = unix_sk(other);
410 spin_lock(&u_other->peer_wait.lock);
411
412 if (u->peer_wake.private == other) {
413 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
414 u->peer_wake.private = NULL;
415 }
416
417 spin_unlock(&u_other->peer_wait.lock);
418 }
419
420 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
421 struct sock *other)
422 {
423 unix_dgram_peer_wake_disconnect(sk, other);
424 wake_up_interruptible_poll(sk_sleep(sk),
425 POLLOUT |
426 POLLWRNORM |
427 POLLWRBAND);
428 }
429
430 /* preconditions:
431 * - unix_peer(sk) == other
432 * - association is stable
433 */
434 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
435 {
436 int connected;
437
438 connected = unix_dgram_peer_wake_connect(sk, other);
439
440 if (unix_recvq_full(other))
441 return 1;
442
443 if (connected)
444 unix_dgram_peer_wake_disconnect(sk, other);
445
446 return 0;
447 }
448
449 static inline int unix_writable(struct sock *sk)
450 {
451 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
452 }
453
454 static void unix_write_space(struct sock *sk)
455 {
456 struct socket_wq *wq;
457
458 rcu_read_lock();
459 if (unix_writable(sk)) {
460 wq = rcu_dereference(sk->sk_wq);
461 if (wq_has_sleeper(wq))
462 wake_up_interruptible_sync_poll(&wq->wait,
463 POLLOUT | POLLWRNORM | POLLWRBAND);
464 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
465 }
466 rcu_read_unlock();
467 }
468
469 /* When dgram socket disconnects (or changes its peer), we clear its receive
470 * queue of packets arrived from previous peer. First, it allows to do
471 * flow control based only on wmem_alloc; second, sk connected to peer
472 * may receive messages only from that peer. */
473 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
474 {
475 if (!skb_queue_empty(&sk->sk_receive_queue)) {
476 skb_queue_purge(&sk->sk_receive_queue);
477 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
478
479 /* If one link of bidirectional dgram pipe is disconnected,
480 * we signal error. Messages are lost. Do not make this,
481 * when peer was not connected to us.
482 */
483 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
484 other->sk_err = ECONNRESET;
485 other->sk_error_report(other);
486 }
487 }
488 }
489
490 static void unix_sock_destructor(struct sock *sk)
491 {
492 struct unix_sock *u = unix_sk(sk);
493
494 skb_queue_purge(&sk->sk_receive_queue);
495
496 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
497 WARN_ON(!sk_unhashed(sk));
498 WARN_ON(sk->sk_socket);
499 if (!sock_flag(sk, SOCK_DEAD)) {
500 #ifdef CONFIG_MTK_NET_LOGGING
501 printk(KERN_INFO "[mtk_net][unix]Attempt to release alive unix socket: %p\n", sk);
502 #endif
503 return;
504 }
505
506 if (u->addr)
507 unix_release_addr(u->addr);
508
509 atomic_long_dec(&unix_nr_socks);
510 local_bh_disable();
511 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
512 local_bh_enable();
513 #ifdef UNIX_REFCNT_DEBUG
514 printk(KERN_DEBUG "[mtk_net][unix]UNIX %p is destroyed, %ld are still alive.\n", sk,
515 atomic_long_read(&unix_nr_socks));
516 #endif
517 }
518
519 static void unix_release_sock(struct sock *sk, int embrion)
520 {
521 struct unix_sock *u = unix_sk(sk);
522 struct path path;
523 struct sock *skpair;
524 struct sk_buff *skb;
525 int state;
526
527 unix_remove_socket(sk);
528
529 /* Clear state */
530 unix_state_lock(sk);
531 sock_orphan(sk);
532 sk->sk_shutdown = SHUTDOWN_MASK;
533 path = u->path;
534 u->path.dentry = NULL;
535 u->path.mnt = NULL;
536 state = sk->sk_state;
537 sk->sk_state = TCP_CLOSE;
538 unix_state_unlock(sk);
539
540 wake_up_interruptible_all(&u->peer_wait);
541
542 skpair = unix_peer(sk);
543
544 if (skpair != NULL) {
545 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
546 unix_state_lock(skpair);
547 /* No more writes */
548 skpair->sk_shutdown = SHUTDOWN_MASK;
549 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
550 skpair->sk_err = ECONNRESET;
551 unix_state_unlock(skpair);
552 skpair->sk_state_change(skpair);
553 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
554 }
555
556 unix_dgram_peer_wake_disconnect(sk, skpair);
557 sock_put(skpair); /* It may now die */
558 unix_peer(sk) = NULL;
559 }
560
561 /* Try to flush out this socket. Throw out buffers at least */
562
563 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
564 if (state == TCP_LISTEN)
565 unix_release_sock(skb->sk, 1);
566 /* passed fds are erased in the kfree_skb hook */
567 kfree_skb(skb);
568 }
569
570 if (path.dentry)
571 path_put(&path);
572
573 sock_put(sk);
574
575 /* ---- Socket is dead now and most probably destroyed ---- */
576
577 /*
578 * Fixme: BSD difference: In BSD all sockets connected to us get
579 * ECONNRESET and we die on the spot. In Linux we behave
580 * like files and pipes do and wait for the last
581 * dereference.
582 *
583 * Can't we simply set sock->err?
584 *
585 * What the above comment does talk about? --ANK(980817)
586 */
587
588 if (unix_tot_inflight)
589 unix_gc(); /* Garbage collect fds */
590 }
591
592 static void init_peercred(struct sock *sk)
593 {
594 put_pid(sk->sk_peer_pid);
595 if (sk->sk_peer_cred)
596 put_cred(sk->sk_peer_cred);
597 sk->sk_peer_pid = get_pid(task_tgid(current));
598 sk->sk_peer_cred = get_current_cred();
599 }
600
601 static void copy_peercred(struct sock *sk, struct sock *peersk)
602 {
603 put_pid(sk->sk_peer_pid);
604 if (sk->sk_peer_cred)
605 put_cred(sk->sk_peer_cred);
606 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
607 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
608 }
609
610 static int unix_listen(struct socket *sock, int backlog)
611 {
612 int err;
613 struct sock *sk = sock->sk;
614 struct unix_sock *u = unix_sk(sk);
615 struct pid *old_pid = NULL;
616
617 err = -EOPNOTSUPP;
618 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
619 goto out; /* Only stream/seqpacket sockets accept */
620 err = -EINVAL;
621 if (!u->addr)
622 goto out; /* No listens on an unbound socket */
623 unix_state_lock(sk);
624 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
625 goto out_unlock;
626 if (backlog > sk->sk_max_ack_backlog)
627 wake_up_interruptible_all(&u->peer_wait);
628 sk->sk_max_ack_backlog = backlog;
629 sk->sk_state = TCP_LISTEN;
630 /* set credentials so connect can copy them */
631 init_peercred(sk);
632 err = 0;
633
634 out_unlock:
635 unix_state_unlock(sk);
636 put_pid(old_pid);
637 out:
638
639 return err;
640 }
641
642 static int unix_release(struct socket *);
643 static int unix_bind(struct socket *, struct sockaddr *, int);
644 static int unix_stream_connect(struct socket *, struct sockaddr *,
645 int addr_len, int flags);
646 static int unix_socketpair(struct socket *, struct socket *);
647 static int unix_accept(struct socket *, struct socket *, int);
648 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
649 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
650 static unsigned int unix_dgram_poll(struct file *, struct socket *,
651 poll_table *);
652 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
653 static int unix_shutdown(struct socket *, int);
654 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
655 struct msghdr *, size_t);
656 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
657 struct msghdr *, size_t, int);
658 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
659 struct msghdr *, size_t);
660 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
661 struct msghdr *, size_t, int);
662 static int unix_dgram_connect(struct socket *, struct sockaddr *,
663 int, int);
664 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
665 struct msghdr *, size_t);
666 static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
667 struct msghdr *, size_t, int);
668
669 static int unix_set_peek_off(struct sock *sk, int val)
670 {
671 struct unix_sock *u = unix_sk(sk);
672
673 if (mutex_lock_interruptible(&u->readlock))
674 return -EINTR;
675
676 sk->sk_peek_off = val;
677 mutex_unlock(&u->readlock);
678
679 return 0;
680 }
681
682
683 static const struct proto_ops unix_stream_ops = {
684 .family = PF_UNIX,
685 .owner = THIS_MODULE,
686 .release = unix_release,
687 .bind = unix_bind,
688 .connect = unix_stream_connect,
689 .socketpair = unix_socketpair,
690 .accept = unix_accept,
691 .getname = unix_getname,
692 .poll = unix_poll,
693 .ioctl = unix_ioctl,
694 .listen = unix_listen,
695 .shutdown = unix_shutdown,
696 .setsockopt = sock_no_setsockopt,
697 .getsockopt = sock_no_getsockopt,
698 .sendmsg = unix_stream_sendmsg,
699 .recvmsg = unix_stream_recvmsg,
700 .mmap = sock_no_mmap,
701 .sendpage = sock_no_sendpage,
702 .set_peek_off = unix_set_peek_off,
703 };
704
705 static const struct proto_ops unix_dgram_ops = {
706 .family = PF_UNIX,
707 .owner = THIS_MODULE,
708 .release = unix_release,
709 .bind = unix_bind,
710 .connect = unix_dgram_connect,
711 .socketpair = unix_socketpair,
712 .accept = sock_no_accept,
713 .getname = unix_getname,
714 .poll = unix_dgram_poll,
715 .ioctl = unix_ioctl,
716 .listen = sock_no_listen,
717 .shutdown = unix_shutdown,
718 .setsockopt = sock_no_setsockopt,
719 .getsockopt = sock_no_getsockopt,
720 .sendmsg = unix_dgram_sendmsg,
721 .recvmsg = unix_dgram_recvmsg,
722 .mmap = sock_no_mmap,
723 .sendpage = sock_no_sendpage,
724 .set_peek_off = unix_set_peek_off,
725 };
726
727 static const struct proto_ops unix_seqpacket_ops = {
728 .family = PF_UNIX,
729 .owner = THIS_MODULE,
730 .release = unix_release,
731 .bind = unix_bind,
732 .connect = unix_stream_connect,
733 .socketpair = unix_socketpair,
734 .accept = unix_accept,
735 .getname = unix_getname,
736 .poll = unix_dgram_poll,
737 .ioctl = unix_ioctl,
738 .listen = unix_listen,
739 .shutdown = unix_shutdown,
740 .setsockopt = sock_no_setsockopt,
741 .getsockopt = sock_no_getsockopt,
742 .sendmsg = unix_seqpacket_sendmsg,
743 .recvmsg = unix_seqpacket_recvmsg,
744 .mmap = sock_no_mmap,
745 .sendpage = sock_no_sendpage,
746 .set_peek_off = unix_set_peek_off,
747 };
748
749 static struct proto unix_proto = {
750 .name = "UNIX",
751 .owner = THIS_MODULE,
752 .obj_size = sizeof(struct unix_sock),
753 };
754
755 /*
756 * AF_UNIX sockets do not interact with hardware, hence they
757 * dont trigger interrupts - so it's safe for them to have
758 * bh-unsafe locking for their sk_receive_queue.lock. Split off
759 * this special lock-class by reinitializing the spinlock key:
760 */
761 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
762
763 static struct sock *unix_create1(struct net *net, struct socket *sock)
764 {
765 struct sock *sk = NULL;
766 struct unix_sock *u;
767
768 atomic_long_inc(&unix_nr_socks);
769 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
770 goto out;
771
772 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
773 if (!sk)
774 goto out;
775
776 sock_init_data(sock, sk);
777 lockdep_set_class(&sk->sk_receive_queue.lock,
778 &af_unix_sk_receive_queue_lock_key);
779
780 sk->sk_write_space = unix_write_space;
781 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
782 sk->sk_destruct = unix_sock_destructor;
783 u = unix_sk(sk);
784 u->path.dentry = NULL;
785 u->path.mnt = NULL;
786 spin_lock_init(&u->lock);
787 atomic_long_set(&u->inflight, 0);
788 INIT_LIST_HEAD(&u->link);
789 mutex_init(&u->readlock); /* single task reading lock */
790 init_waitqueue_head(&u->peer_wait);
791 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
792 unix_insert_socket(unix_sockets_unbound(sk), sk);
793 out:
794 if (sk == NULL)
795 atomic_long_dec(&unix_nr_socks);
796 else {
797 local_bh_disable();
798 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
799 local_bh_enable();
800 }
801 return sk;
802 }
803
804 static int unix_create(struct net *net, struct socket *sock, int protocol,
805 int kern)
806 {
807 if (protocol && protocol != PF_UNIX)
808 return -EPROTONOSUPPORT;
809
810 sock->state = SS_UNCONNECTED;
811
812 switch (sock->type) {
813 case SOCK_STREAM:
814 sock->ops = &unix_stream_ops;
815 break;
816 /*
817 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
818 * nothing uses it.
819 */
820 case SOCK_RAW:
821 sock->type = SOCK_DGRAM;
822 case SOCK_DGRAM:
823 sock->ops = &unix_dgram_ops;
824 break;
825 case SOCK_SEQPACKET:
826 sock->ops = &unix_seqpacket_ops;
827 break;
828 default:
829 return -ESOCKTNOSUPPORT;
830 }
831
832 return unix_create1(net, sock) ? 0 : -ENOMEM;
833 }
834
835 static int unix_release(struct socket *sock)
836 {
837 struct sock *sk = sock->sk;
838
839 if (!sk)
840 return 0;
841
842 unix_release_sock(sk, 0);
843 sock->sk = NULL;
844
845 return 0;
846 }
847
848 static int unix_autobind(struct socket *sock)
849 {
850 struct sock *sk = sock->sk;
851 struct net *net = sock_net(sk);
852 struct unix_sock *u = unix_sk(sk);
853 static u32 ordernum = 1;
854 struct unix_address *addr;
855 int err;
856 unsigned int retries = 0;
857
858 err = mutex_lock_interruptible(&u->readlock);
859 if (err)
860 return err;
861
862 err = 0;
863 if (u->addr)
864 goto out;
865
866 err = -ENOMEM;
867 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
868 if (!addr)
869 goto out;
870
871 addr->name->sun_family = AF_UNIX;
872 atomic_set(&addr->refcnt, 1);
873
874 retry:
875 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
876 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
877
878 spin_lock(&unix_table_lock);
879 ordernum = (ordernum+1)&0xFFFFF;
880
881 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
882 addr->hash)) {
883 spin_unlock(&unix_table_lock);
884 /*
885 * __unix_find_socket_byname() may take long time if many names
886 * are already in use.
887 */
888 cond_resched();
889 /* Give up if all names seems to be in use. */
890 if (retries++ == 0xFFFFF) {
891 err = -ENOSPC;
892 kfree(addr);
893 goto out;
894 }
895 goto retry;
896 }
897 addr->hash ^= sk->sk_type;
898
899 __unix_remove_socket(sk);
900 u->addr = addr;
901 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
902 spin_unlock(&unix_table_lock);
903 err = 0;
904
905 out: mutex_unlock(&u->readlock);
906 return err;
907 }
908
909 static struct sock *unix_find_other(struct net *net,
910 struct sockaddr_un *sunname, int len,
911 int type, unsigned int hash, int *error)
912 {
913 struct sock *u;
914 struct path path;
915 int err = 0;
916
917 if (sunname->sun_path[0]) {
918 struct inode *inode;
919 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
920 if (err)
921 goto fail;
922 inode = path.dentry->d_inode;
923 err = inode_permission(inode, MAY_WRITE);
924 if (err)
925 goto put_fail;
926
927 err = -ECONNREFUSED;
928 if (!S_ISSOCK(inode->i_mode))
929 goto put_fail;
930 u = unix_find_socket_byinode(inode);
931 if (!u)
932 goto put_fail;
933
934 if (u->sk_type == type)
935 touch_atime(&path);
936
937 path_put(&path);
938
939 err = -EPROTOTYPE;
940 if (u->sk_type != type) {
941 sock_put(u);
942 goto fail;
943 }
944 } else {
945 err = -ECONNREFUSED;
946 u = unix_find_socket_byname(net, sunname, len, type, hash);
947 if (u) {
948 struct dentry *dentry;
949 dentry = unix_sk(u)->path.dentry;
950 if (dentry)
951 touch_atime(&unix_sk(u)->path);
952 } else
953 goto fail;
954 }
955 return u;
956
957 put_fail:
958 path_put(&path);
959 fail:
960 *error = err;
961 return NULL;
962 }
963
964 static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
965 {
966 struct dentry *dentry;
967 struct path path;
968 int err = 0;
969 /*
970 * Get the parent directory, calculate the hash for last
971 * component.
972 */
973 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
974 err = PTR_ERR(dentry);
975 if (IS_ERR(dentry))
976 return err;
977
978 /*
979 * All right, let's create it.
980 */
981 err = security_path_mknod(&path, dentry, mode, 0);
982 if (!err) {
983 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
984 if (!err) {
985 res->mnt = mntget(path.mnt);
986 res->dentry = dget(dentry);
987 }
988 }
989 done_path_create(&path, dentry);
990 return err;
991 }
992
993 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
994 {
995 struct sock *sk = sock->sk;
996 struct net *net = sock_net(sk);
997 struct unix_sock *u = unix_sk(sk);
998 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
999 char *sun_path = sunaddr->sun_path;
1000 int err;
1001 unsigned int hash;
1002 struct unix_address *addr;
1003 struct hlist_head *list;
1004 struct path path = { NULL, NULL };
1005
1006 err = -EINVAL;
1007 if (sunaddr->sun_family != AF_UNIX)
1008 goto out;
1009
1010 if (addr_len == sizeof(short)) {
1011 err = unix_autobind(sock);
1012 goto out;
1013 }
1014
1015 err = unix_mkname(sunaddr, addr_len, &hash);
1016 if (err < 0)
1017 goto out;
1018 addr_len = err;
1019
1020 if (sun_path[0]) {
1021 umode_t mode = S_IFSOCK |
1022 (SOCK_INODE(sock)->i_mode & ~current_umask());
1023 err = unix_mknod(sun_path, mode, &path);
1024 if (err) {
1025 if (err == -EEXIST)
1026 err = -EADDRINUSE;
1027 goto out;
1028 }
1029 }
1030
1031 err = mutex_lock_interruptible(&u->readlock);
1032 if (err)
1033 goto out_put;
1034
1035 err = -EINVAL;
1036 if (u->addr)
1037 goto out_up;
1038
1039 err = -ENOMEM;
1040 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1041 if (!addr)
1042 goto out_up;
1043
1044 memcpy(addr->name, sunaddr, addr_len);
1045 addr->len = addr_len;
1046 addr->hash = hash ^ sk->sk_type;
1047 atomic_set(&addr->refcnt, 1);
1048
1049 if (sun_path[0]) {
1050 addr->hash = UNIX_HASH_SIZE;
1051 hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
1052 spin_lock(&unix_table_lock);
1053 u->path = path;
1054 list = &unix_socket_table[hash];
1055 } else {
1056 spin_lock(&unix_table_lock);
1057 err = -EADDRINUSE;
1058 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1059 sk->sk_type, hash)) {
1060 unix_release_addr(addr);
1061 goto out_unlock;
1062 }
1063
1064 list = &unix_socket_table[addr->hash];
1065 }
1066
1067 err = 0;
1068 __unix_remove_socket(sk);
1069 u->addr = addr;
1070 __unix_insert_socket(list, sk);
1071
1072 out_unlock:
1073 spin_unlock(&unix_table_lock);
1074 out_up:
1075 mutex_unlock(&u->readlock);
1076 out_put:
1077 if (err)
1078 path_put(&path);
1079 out:
1080
1081 return err;
1082 }
1083
1084 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1085 {
1086 if (unlikely(sk1 == sk2) || !sk2) {
1087 unix_state_lock(sk1);
1088 return;
1089 }
1090 if (sk1 < sk2) {
1091 unix_state_lock(sk1);
1092 unix_state_lock_nested(sk2);
1093 } else {
1094 unix_state_lock(sk2);
1095 unix_state_lock_nested(sk1);
1096 }
1097 }
1098
1099 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1100 {
1101 if (unlikely(sk1 == sk2) || !sk2) {
1102 unix_state_unlock(sk1);
1103 return;
1104 }
1105 unix_state_unlock(sk1);
1106 unix_state_unlock(sk2);
1107 }
1108
1109 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1110 int alen, int flags)
1111 {
1112 struct sock *sk = sock->sk;
1113 struct net *net = sock_net(sk);
1114 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1115 struct sock *other;
1116 unsigned int hash;
1117 int err;
1118
1119 if (addr->sa_family != AF_UNSPEC) {
1120
1121 err = unix_mkname(sunaddr, alen, &hash);
1122 if (err < 0)
1123 goto out;
1124 alen = err;
1125
1126 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1127 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1128 goto out;
1129
1130 restart:
1131 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1132 if (!other)
1133 goto out;
1134
1135 unix_state_double_lock(sk, other);
1136
1137 /* Apparently VFS overslept socket death. Retry. */
1138 if (sock_flag(other, SOCK_DEAD)) {
1139 unix_state_double_unlock(sk, other);
1140 sock_put(other);
1141 goto restart;
1142 }
1143
1144 err = -EPERM;
1145 if (!unix_may_send(sk, other))
1146 goto out_unlock;
1147
1148 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1149 if (err)
1150 goto out_unlock;
1151
1152 } else {
1153 /*
1154 * 1003.1g breaking connected state with AF_UNSPEC
1155 */
1156 other = NULL;
1157 unix_state_double_lock(sk, other);
1158 }
1159
1160 /*
1161 * If it was connected, reconnect.
1162 */
1163 if (unix_peer(sk)) {
1164 struct sock *old_peer = unix_peer(sk);
1165 unix_peer(sk) = other;
1166 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1167
1168 unix_state_double_unlock(sk, other);
1169
1170 if (other != old_peer)
1171 unix_dgram_disconnected(sk, old_peer);
1172 sock_put(old_peer);
1173 } else {
1174 unix_peer(sk) = other;
1175 unix_state_double_unlock(sk, other);
1176 }
1177
1178 #ifdef CONFIG_MTK_NET_LOGGING
1179 if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
1180 {
1181 printk(KERN_INFO "[mtk_net][socket]unix_dgram_connect[%lu]:connect [%s] other[%lu]\n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
1182 }
1183 #endif
1184
1185 return 0;
1186
1187 out_unlock:
1188 unix_state_double_unlock(sk, other);
1189 sock_put(other);
1190 out:
1191
1192 return err;
1193 }
1194
1195 static long unix_wait_for_peer(struct sock *other, long timeo)
1196 {
1197 struct unix_sock *u = unix_sk(other);
1198 int sched;
1199 DEFINE_WAIT(wait);
1200
1201 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1202
1203 sched = !sock_flag(other, SOCK_DEAD) &&
1204 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1205 unix_recvq_full(other);
1206
1207 unix_state_unlock(other);
1208
1209 if (sched)
1210 timeo = schedule_timeout(timeo);
1211
1212 finish_wait(&u->peer_wait, &wait);
1213 return timeo;
1214 }
1215
1216 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1217 int addr_len, int flags)
1218 {
1219 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1220 struct sock *sk = sock->sk;
1221 struct net *net = sock_net(sk);
1222 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1223 struct sock *newsk = NULL;
1224 struct sock *other = NULL;
1225 struct sk_buff *skb = NULL;
1226 unsigned int hash;
1227 int st;
1228 int err;
1229 long timeo;
1230
1231 err = unix_mkname(sunaddr, addr_len, &hash);
1232 if (err < 0)
1233 goto out;
1234 addr_len = err;
1235
1236 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1237 (err = unix_autobind(sock)) != 0)
1238 goto out;
1239
1240 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1241
1242 /* First of all allocate resources.
1243 If we will make it after state is locked,
1244 we will have to recheck all again in any case.
1245 */
1246
1247 err = -ENOMEM;
1248
1249 /* create new sock for complete connection */
1250 newsk = unix_create1(sock_net(sk), NULL);
1251 if (newsk == NULL)
1252 goto out;
1253
1254 /* Allocate skb for sending to listening sock */
1255 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1256 if (skb == NULL)
1257 goto out;
1258
1259 restart:
1260 /* Find listening sock. */
1261 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1262 if (!other)
1263 goto out;
1264
1265 /* Latch state of peer */
1266 unix_state_lock(other);
1267
1268 /* Apparently VFS overslept socket death. Retry. */
1269 if (sock_flag(other, SOCK_DEAD)) {
1270 unix_state_unlock(other);
1271 sock_put(other);
1272 goto restart;
1273 }
1274
1275 err = -ECONNREFUSED;
1276 if (other->sk_state != TCP_LISTEN)
1277 goto out_unlock;
1278 if (other->sk_shutdown & RCV_SHUTDOWN)
1279 goto out_unlock;
1280
1281 if (unix_recvq_full(other)) {
1282 err = -EAGAIN;
1283 if (!timeo)
1284 goto out_unlock;
1285
1286 timeo = unix_wait_for_peer(other, timeo);
1287
1288 err = sock_intr_errno(timeo);
1289 if (signal_pending(current))
1290 goto out;
1291 sock_put(other);
1292 goto restart;
1293 }
1294
1295 /* Latch our state.
1296
1297 It is tricky place. We need to grab our state lock and cannot
1298 drop lock on peer. It is dangerous because deadlock is
1299 possible. Connect to self case and simultaneous
1300 attempt to connect are eliminated by checking socket
1301 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1302 check this before attempt to grab lock.
1303
1304 Well, and we have to recheck the state after socket locked.
1305 */
1306 st = sk->sk_state;
1307
1308 switch (st) {
1309 case TCP_CLOSE:
1310 /* This is ok... continue with connect */
1311 break;
1312 case TCP_ESTABLISHED:
1313 /* Socket is already connected */
1314 err = -EISCONN;
1315 goto out_unlock;
1316 default:
1317 err = -EINVAL;
1318 goto out_unlock;
1319 }
1320
1321 unix_state_lock_nested(sk);
1322
1323 if (sk->sk_state != st) {
1324 unix_state_unlock(sk);
1325 unix_state_unlock(other);
1326 sock_put(other);
1327 goto restart;
1328 }
1329
1330 err = security_unix_stream_connect(sk, other, newsk);
1331 if (err) {
1332 unix_state_unlock(sk);
1333 goto out_unlock;
1334 }
1335
1336 /* The way is open! Fastly set all the necessary fields... */
1337
1338 sock_hold(sk);
1339 unix_peer(newsk) = sk;
1340 newsk->sk_state = TCP_ESTABLISHED;
1341 newsk->sk_type = sk->sk_type;
1342 init_peercred(newsk);
1343 newu = unix_sk(newsk);
1344 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1345 otheru = unix_sk(other);
1346
1347 /* copy address information from listening to new sock*/
1348 if (otheru->addr) {
1349 atomic_inc(&otheru->addr->refcnt);
1350 newu->addr = otheru->addr;
1351 }
1352 if (otheru->path.dentry) {
1353 path_get(&otheru->path);
1354 newu->path = otheru->path;
1355 }
1356
1357 /* Set credentials */
1358 copy_peercred(sk, other);
1359
1360 sock->state = SS_CONNECTED;
1361 sk->sk_state = TCP_ESTABLISHED;
1362 sock_hold(newsk);
1363
1364 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1365 unix_peer(sk) = newsk;
1366
1367 unix_state_unlock(sk);
1368
1369 /* take ten and and send info to listening sock */
1370 spin_lock(&other->sk_receive_queue.lock);
1371 __skb_queue_tail(&other->sk_receive_queue, skb);
1372 spin_unlock(&other->sk_receive_queue.lock);
1373 unix_state_unlock(other);
1374
1375 #ifdef CONFIG_MTK_NET_LOGGING
1376 if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
1377 {
1378 printk(KERN_INFO "[mtk_net][socket]unix_stream_connect[%lu ]: connect [%s] other[%lu] \n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
1379 }
1380 #endif
1381
1382 other->sk_data_ready(other, 0);
1383 sock_put(other);
1384
1385 return 0;
1386
1387 out_unlock:
1388 if (other)
1389 unix_state_unlock(other);
1390
1391 out:
1392 kfree_skb(skb);
1393 if (newsk)
1394 unix_release_sock(newsk, 0);
1395 if (other)
1396 sock_put(other);
1397
1398 return err;
1399 }
1400
1401 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1402 {
1403 struct sock *ska = socka->sk, *skb = sockb->sk;
1404
1405 /* Join our sockets back to back */
1406 sock_hold(ska);
1407 sock_hold(skb);
1408 unix_peer(ska) = skb;
1409 unix_peer(skb) = ska;
1410 init_peercred(ska);
1411 init_peercred(skb);
1412
1413 if (ska->sk_type != SOCK_DGRAM) {
1414 ska->sk_state = TCP_ESTABLISHED;
1415 skb->sk_state = TCP_ESTABLISHED;
1416 socka->state = SS_CONNECTED;
1417 sockb->state = SS_CONNECTED;
1418 }
1419 return 0;
1420 }
1421
1422 static void unix_sock_inherit_flags(const struct socket *old,
1423 struct socket *new)
1424 {
1425 if (test_bit(SOCK_PASSCRED, &old->flags))
1426 set_bit(SOCK_PASSCRED, &new->flags);
1427 if (test_bit(SOCK_PASSSEC, &old->flags))
1428 set_bit(SOCK_PASSSEC, &new->flags);
1429 }
1430
1431 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1432 {
1433 struct sock *sk = sock->sk;
1434 struct sock *tsk;
1435 struct sk_buff *skb;
1436 int err;
1437
1438 err = -EOPNOTSUPP;
1439 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1440 goto out;
1441
1442 err = -EINVAL;
1443 if (sk->sk_state != TCP_LISTEN)
1444 goto out;
1445
1446 /* If socket state is TCP_LISTEN it cannot change (for now...),
1447 * so that no locks are necessary.
1448 */
1449
1450 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1451 if (!skb) {
1452 /* This means receive shutdown. */
1453 if (err == 0)
1454 err = -EINVAL;
1455 goto out;
1456 }
1457
1458 tsk = skb->sk;
1459 skb_free_datagram(sk, skb);
1460 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1461
1462 /* attach accepted sock to socket */
1463 unix_state_lock(tsk);
1464 newsock->state = SS_CONNECTED;
1465 unix_sock_inherit_flags(sock, newsock);
1466 sock_graft(tsk, newsock);
1467 unix_state_unlock(tsk);
1468
1469 return 0;
1470
1471 out:
1472
1473 return err;
1474 }
1475
1476
1477 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1478 {
1479 struct sock *sk = sock->sk;
1480 struct unix_sock *u;
1481 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1482 int err = 0;
1483
1484 if (peer) {
1485 sk = unix_peer_get(sk);
1486
1487 err = -ENOTCONN;
1488 if (!sk)
1489 goto out;
1490 err = 0;
1491 } else {
1492 sock_hold(sk);
1493 }
1494
1495 u = unix_sk(sk);
1496 unix_state_lock(sk);
1497 if (!u->addr) {
1498 sunaddr->sun_family = AF_UNIX;
1499 sunaddr->sun_path[0] = 0;
1500 *uaddr_len = sizeof(short);
1501 } else {
1502 struct unix_address *addr = u->addr;
1503
1504 *uaddr_len = addr->len;
1505 memcpy(sunaddr, addr->name, *uaddr_len);
1506 }
1507 unix_state_unlock(sk);
1508 sock_put(sk);
1509 out:
1510 return err;
1511 }
1512
1513 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1514 {
1515 int i;
1516
1517 scm->fp = UNIXCB(skb).fp;
1518 UNIXCB(skb).fp = NULL;
1519
1520 for (i = scm->fp->count-1; i >= 0; i--)
1521 unix_notinflight(scm->fp->user, scm->fp->fp[i]);
1522 }
1523
1524 static void unix_destruct_scm(struct sk_buff *skb)
1525 {
1526 struct scm_cookie scm;
1527 memset(&scm, 0, sizeof(scm));
1528 scm.pid = UNIXCB(skb).pid;
1529 if (UNIXCB(skb).fp)
1530 unix_detach_fds(&scm, skb);
1531
1532 /* Alas, it calls VFS */
1533 /* So fscking what? fput() had been SMP-safe since the last Summer */
1534 scm_destroy(&scm);
1535 sock_wfree(skb);
1536 }
1537
1538 /*
1539 * The "user->unix_inflight" variable is protected by the garbage
1540 * collection lock, and we just read it locklessly here. If you go
1541 * over the limit, there might be a tiny race in actually noticing
1542 * it across threads. Tough.
1543 */
1544 static inline bool too_many_unix_fds(struct task_struct *p)
1545 {
1546 struct user_struct *user = current_user();
1547
1548 if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
1549 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1550 return false;
1551 }
1552
1553 #define MAX_RECURSION_LEVEL 4
1554
1555 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1556 {
1557 int i;
1558 unsigned char max_level = 0;
1559 int unix_sock_count = 0;
1560
1561 if (too_many_unix_fds(current))
1562 return -ETOOMANYREFS;
1563
1564 for (i = scm->fp->count - 1; i >= 0; i--) {
1565 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1566
1567 if (sk) {
1568 unix_sock_count++;
1569 max_level = max(max_level,
1570 unix_sk(sk)->recursion_level);
1571 }
1572 }
1573 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1574 return -ETOOMANYREFS;
1575
1576 /*
1577 * Need to duplicate file references for the sake of garbage
1578 * collection. Otherwise a socket in the fps might become a
1579 * candidate for GC while the skb is not yet queued.
1580 */
1581 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1582 if (!UNIXCB(skb).fp)
1583 return -ENOMEM;
1584
1585 for (i = scm->fp->count - 1; i >= 0; i--)
1586 unix_inflight(scm->fp->user, scm->fp->fp[i]);
1587 return max_level;
1588 }
1589
1590 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1591 {
1592 int err = 0;
1593
1594 UNIXCB(skb).pid = get_pid(scm->pid);
1595 UNIXCB(skb).uid = scm->creds.uid;
1596 UNIXCB(skb).gid = scm->creds.gid;
1597 UNIXCB(skb).fp = NULL;
1598 if (scm->fp && send_fds)
1599 err = unix_attach_fds(scm, skb);
1600
1601 skb->destructor = unix_destruct_scm;
1602 return err;
1603 }
1604
1605 /*
1606 * Some apps rely on write() giving SCM_CREDENTIALS
1607 * We include credentials if source or destination socket
1608 * asserted SOCK_PASSCRED.
1609 */
1610 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1611 const struct sock *other)
1612 {
1613 if (UNIXCB(skb).pid)
1614 return;
1615 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1616 !other->sk_socket ||
1617 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1618 UNIXCB(skb).pid = get_pid(task_tgid(current));
1619 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1620 }
1621 }
1622
1623 /*
1624 * Send AF_UNIX data.
1625 */
1626
1627 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1628 struct msghdr *msg, size_t len)
1629 {
1630 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1631 struct sock *sk = sock->sk;
1632 struct net *net = sock_net(sk);
1633 struct unix_sock *u = unix_sk(sk);
1634 struct sockaddr_un *sunaddr = msg->msg_name;
1635 struct sock *other = NULL;
1636 int namelen = 0; /* fake GCC */
1637 int err;
1638 unsigned int hash;
1639 struct sk_buff *skb;
1640 long timeo;
1641 struct scm_cookie tmp_scm;
1642 int max_level;
1643 int data_len = 0;
1644 int sk_locked;
1645
1646 if (NULL == siocb->scm)
1647 siocb->scm = &tmp_scm;
1648 wait_for_unix_gc();
1649 err = scm_send(sock, msg, siocb->scm, false);
1650 if (err < 0)
1651 return err;
1652
1653 err = -EOPNOTSUPP;
1654 if (msg->msg_flags&MSG_OOB)
1655 goto out;
1656
1657 if (msg->msg_namelen) {
1658 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1659 if (err < 0)
1660 goto out;
1661 namelen = err;
1662 } else {
1663 sunaddr = NULL;
1664 err = -ENOTCONN;
1665 other = unix_peer_get(sk);
1666 if (!other)
1667 goto out;
1668 }
1669
1670 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1671 && (err = unix_autobind(sock)) != 0)
1672 goto out;
1673
1674 err = -EMSGSIZE;
1675 if (len > sk->sk_sndbuf - 32)
1676 goto out;
1677
1678 if (len > SKB_MAX_ALLOC)
1679 data_len = min_t(size_t,
1680 len - SKB_MAX_ALLOC,
1681 MAX_SKB_FRAGS * PAGE_SIZE);
1682
1683 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1684 msg->msg_flags & MSG_DONTWAIT, &err);
1685 if (skb == NULL)
1686 goto out;
1687
1688 err = unix_scm_to_skb(siocb->scm, skb, true);
1689 if (err < 0)
1690 goto out_free;
1691 max_level = err + 1;
1692 unix_get_secdata(siocb->scm, skb);
1693
1694 skb_put(skb, len - data_len);
1695 skb->data_len = data_len;
1696 skb->len = len;
1697 err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
1698 if (err)
1699 goto out_free;
1700
1701 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1702
1703 restart:
1704 if (!other) {
1705 err = -ECONNRESET;
1706 if (sunaddr == NULL)
1707 goto out_free;
1708
1709 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1710 hash, &err);
1711 if (other == NULL)
1712 goto out_free;
1713 }
1714
1715 if (sk_filter(other, skb) < 0) {
1716 /* Toss the packet but do not return any error to the sender */
1717 err = len;
1718 goto out_free;
1719 }
1720
1721 sk_locked = 0;
1722 unix_state_lock(other);
1723 restart_locked:
1724 err = -EPERM;
1725 if (!unix_may_send(sk, other))
1726 goto out_unlock;
1727
1728 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1729 /*
1730 * Check with 1003.1g - what should
1731 * datagram error
1732 */
1733 unix_state_unlock(other);
1734 sock_put(other);
1735
1736 if (!sk_locked)
1737 unix_state_lock(sk);
1738
1739 err = 0;
1740 if (unix_peer(sk) == other) {
1741 unix_peer(sk) = NULL;
1742 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1743
1744 unix_state_unlock(sk);
1745
1746 unix_dgram_disconnected(sk, other);
1747 sock_put(other);
1748 err = -ECONNREFUSED;
1749 } else {
1750 unix_state_unlock(sk);
1751 }
1752
1753 other = NULL;
1754 if (err)
1755 goto out_free;
1756 goto restart;
1757 }
1758
1759 err = -EPIPE;
1760 if (other->sk_shutdown & RCV_SHUTDOWN)
1761 goto out_unlock;
1762
1763 if (sk->sk_type != SOCK_SEQPACKET) {
1764 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1765 if (err)
1766 goto out_unlock;
1767 }
1768
1769 /* other == sk && unix_peer(other) != sk if
1770 * - unix_peer(sk) == NULL, destination address bound to sk
1771 * - unix_peer(sk) == sk by time of get but disconnected before lock
1772 */
1773 if (other != sk &&
1774 unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1775 if (timeo) {
1776 timeo = unix_wait_for_peer(other, timeo);
1777
1778 err = sock_intr_errno(timeo);
1779 if (signal_pending(current))
1780 goto out_free;
1781
1782 goto restart;
1783 }
1784
1785 if (!sk_locked) {
1786 unix_state_unlock(other);
1787 unix_state_double_lock(sk, other);
1788 }
1789
1790 if (unix_peer(sk) != other ||
1791 unix_dgram_peer_wake_me(sk, other)) {
1792 err = -EAGAIN;
1793 sk_locked = 1;
1794 goto out_unlock;
1795 }
1796
1797 if (!sk_locked) {
1798 sk_locked = 1;
1799 goto restart_locked;
1800 }
1801 }
1802
1803 if (unlikely(sk_locked))
1804 unix_state_unlock(sk);
1805
1806 if (sock_flag(other, SOCK_RCVTSTAMP))
1807 __net_timestamp(skb);
1808 maybe_add_creds(skb, sock, other);
1809 skb_queue_tail(&other->sk_receive_queue, skb);
1810 if (max_level > unix_sk(other)->recursion_level)
1811 unix_sk(other)->recursion_level = max_level;
1812 unix_state_unlock(other);
1813 other->sk_data_ready(other, len);
1814 sock_put(other);
1815 scm_destroy(siocb->scm);
1816
1817 return len;
1818
1819 out_unlock:
1820 if (sk_locked)
1821 unix_state_unlock(sk);
1822 unix_state_unlock(other);
1823 out_free:
1824 kfree_skb(skb);
1825 out:
1826 if (other)
1827 sock_put(other);
1828 scm_destroy(siocb->scm);
1829
1830 return err;
1831 }
1832
1833
1834 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1835 struct msghdr *msg, size_t len)
1836 {
1837 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1838 struct sock *sk = sock->sk;
1839 struct sock *other = NULL;
1840 int err, size;
1841 struct sk_buff *skb;
1842 int sent = 0;
1843 struct scm_cookie tmp_scm;
1844 bool fds_sent = false;
1845 int max_level;
1846
1847 if (NULL == siocb->scm)
1848 siocb->scm = &tmp_scm;
1849
1850 wait_for_unix_gc();
1851 err = scm_send(sock, msg, siocb->scm, false);
1852 if (err < 0)
1853 return err;
1854
1855 err = -EOPNOTSUPP;
1856 if (msg->msg_flags&MSG_OOB)
1857 goto out_err;
1858
1859 if (msg->msg_namelen) {
1860 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1861 goto out_err;
1862 } else {
1863 err = -ENOTCONN;
1864 other = unix_peer(sk);
1865 if (!other)
1866 goto out_err;
1867 }
1868
1869 if (sk->sk_shutdown & SEND_SHUTDOWN)
1870 goto pipe_err;
1871
1872 while (sent < len) {
1873 /*
1874 * Optimisation for the fact that under 0.01% of X
1875 * messages typically need breaking up.
1876 */
1877
1878 size = len-sent;
1879
1880 /* Keep two messages in the pipe so it schedules better */
1881 if (size > ((sk->sk_sndbuf >> 1) - 64))
1882 size = (sk->sk_sndbuf >> 1) - 64;
1883
1884 if (size > SKB_MAX_ALLOC)
1885 size = SKB_MAX_ALLOC;
1886
1887 /*
1888 * Grab a buffer
1889 */
1890
1891 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1892 &err);
1893
1894
1895 if (skb == NULL)
1896 goto out_err;
1897
1898 /*
1899 * If you pass two values to the sock_alloc_send_skb
1900 * it tries to grab the large buffer with GFP_NOFS
1901 * (which can fail easily), and if it fails grab the
1902 * fallback size buffer which is under a page and will
1903 * succeed. [Alan]
1904 */
1905 size = min_t(int, size, skb_tailroom(skb));
1906
1907
1908 /* Only send the fds in the first buffer */
1909 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1910 if (err < 0) {
1911 kfree_skb(skb);
1912 goto out_err;
1913 }
1914 max_level = err + 1;
1915 fds_sent = true;
1916
1917 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1918 if (err) {
1919 kfree_skb(skb);
1920 goto out_err;
1921 }
1922
1923 unix_state_lock(other);
1924
1925 if (sock_flag(other, SOCK_DEAD) ||
1926 (other->sk_shutdown & RCV_SHUTDOWN))
1927 {
1928 if( other->sk_socket )
1929 {
1930 if(sk->sk_socket)
1931 {
1932
1933 #ifdef CONFIG_MTK_NET_LOGGING
1934 printk(KERN_INFO " [mtk_net][unix]: sendmsg[%lu:%lu]:peer close\n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
1935 #endif
1936 }
1937 else{
1938 #ifdef CONFIG_MTK_NET_LOGGING
1939 printk(KERN_INFO " [mtk_net][unix]: sendmsg[null:%lu]:peer close\n" ,SOCK_INODE(other->sk_socket)->i_ino);
1940 #endif
1941 }
1942
1943 }
1944 else
1945 {
1946 #ifdef CONFIG_MTK_NET_LOGGING
1947 printk(KERN_INFO " [mtk_net][unix]: sendmsg:peer close \n" );
1948 #endif
1949 }
1950
1951
1952 goto pipe_err_free;
1953 }
1954
1955 maybe_add_creds(skb, sock, other);
1956 skb_queue_tail(&other->sk_receive_queue, skb);
1957 if (max_level > unix_sk(other)->recursion_level)
1958 unix_sk(other)->recursion_level = max_level;
1959 unix_state_unlock(other);
1960 other->sk_data_ready(other, size);
1961 sent += size;
1962 }
1963
1964 scm_destroy(siocb->scm);
1965 siocb->scm = NULL;
1966
1967 return sent;
1968
1969 pipe_err_free:
1970 unix_state_unlock(other);
1971 kfree_skb(skb);
1972 pipe_err:
1973 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1974 send_sig(SIGPIPE, current, 0);
1975 err = -EPIPE;
1976 out_err:
1977 scm_destroy(siocb->scm);
1978 siocb->scm = NULL;
1979
1980 return sent ? : err;
1981 }
1982
1983 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1984 struct msghdr *msg, size_t len)
1985 {
1986 int err;
1987 struct sock *sk = sock->sk;
1988
1989 err = sock_error(sk);
1990 if (err)
1991 return err;
1992
1993 if (sk->sk_state != TCP_ESTABLISHED)
1994 return -ENOTCONN;
1995
1996 if (msg->msg_namelen)
1997 msg->msg_namelen = 0;
1998
1999 return unix_dgram_sendmsg(kiocb, sock, msg, len);
2000 }
2001
2002 static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
2003 struct msghdr *msg, size_t size,
2004 int flags)
2005 {
2006 struct sock *sk = sock->sk;
2007
2008 if (sk->sk_state != TCP_ESTABLISHED)
2009 return -ENOTCONN;
2010
2011 return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
2012 }
2013
2014 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2015 {
2016 struct unix_sock *u = unix_sk(sk);
2017
2018 if (u->addr) {
2019 msg->msg_namelen = u->addr->len;
2020 memcpy(msg->msg_name, u->addr->name, u->addr->len);
2021 }
2022 }
2023
2024 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
2025 struct msghdr *msg, size_t size,
2026 int flags)
2027 {
2028 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
2029 struct scm_cookie tmp_scm;
2030 struct sock *sk = sock->sk;
2031 struct unix_sock *u = unix_sk(sk);
2032 int noblock = flags & MSG_DONTWAIT;
2033 struct sk_buff *skb;
2034 int err;
2035 int peeked, skip;
2036
2037 err = -EOPNOTSUPP;
2038 if (flags&MSG_OOB)
2039 goto out;
2040
2041 err = mutex_lock_interruptible(&u->readlock);
2042 if (unlikely(err)) {
2043 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2044 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2045 */
2046 err = noblock ? -EAGAIN : -ERESTARTSYS;
2047 goto out;
2048 }
2049
2050 skip = sk_peek_offset(sk, flags);
2051
2052 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
2053 if (!skb) {
2054 unix_state_lock(sk);
2055 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2056 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2057 (sk->sk_shutdown & RCV_SHUTDOWN))
2058 err = 0;
2059 unix_state_unlock(sk);
2060 goto out_unlock;
2061 }
2062
2063 wake_up_interruptible_sync_poll(&u->peer_wait,
2064 POLLOUT | POLLWRNORM | POLLWRBAND);
2065
2066 if (msg->msg_name)
2067 unix_copy_addr(msg, skb->sk);
2068
2069 if (size > skb->len - skip)
2070 size = skb->len - skip;
2071 else if (size < skb->len - skip)
2072 msg->msg_flags |= MSG_TRUNC;
2073
2074 err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
2075 if (err)
2076 goto out_free;
2077
2078 if (sock_flag(sk, SOCK_RCVTSTAMP))
2079 __sock_recv_timestamp(msg, sk, skb);
2080
2081 if (!siocb->scm) {
2082 siocb->scm = &tmp_scm;
2083 memset(&tmp_scm, 0, sizeof(tmp_scm));
2084 }
2085 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2086 unix_set_secdata(siocb->scm, skb);
2087
2088 if (!(flags & MSG_PEEK)) {
2089 if (UNIXCB(skb).fp)
2090 unix_detach_fds(siocb->scm, skb);
2091
2092 sk_peek_offset_bwd(sk, skb->len);
2093 } else {
2094 /* It is questionable: on PEEK we could:
2095 - do not return fds - good, but too simple 8)
2096 - return fds, and do not return them on read (old strategy,
2097 apparently wrong)
2098 - clone fds (I chose it for now, it is the most universal
2099 solution)
2100
2101 POSIX 1003.1g does not actually define this clearly
2102 at all. POSIX 1003.1g doesn't define a lot of things
2103 clearly however!
2104
2105 */
2106
2107 sk_peek_offset_fwd(sk, size);
2108
2109 if (UNIXCB(skb).fp)
2110 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2111 }
2112 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2113
2114 scm_recv(sock, msg, siocb->scm, flags);
2115
2116 out_free:
2117 skb_free_datagram(sk, skb);
2118 out_unlock:
2119 mutex_unlock(&u->readlock);
2120 out:
2121
2122 return err;
2123 }
2124
2125 /*
2126 * Sleep until more data has arrived. But check for races..
2127 */
2128 static long unix_stream_data_wait(struct sock *sk, long timeo,
2129 struct sk_buff *last)
2130 {
2131 DEFINE_WAIT(wait);
2132
2133 unix_state_lock(sk);
2134
2135 for (;;) {
2136 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2137
2138 if (skb_peek_tail(&sk->sk_receive_queue) != last ||
2139 sk->sk_err ||
2140 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2141 signal_pending(current) ||
2142 !timeo)
2143 break;
2144
2145 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2146 unix_state_unlock(sk);
2147 timeo = freezable_schedule_timeout(timeo);
2148 unix_state_lock(sk);
2149
2150 if (sock_flag(sk, SOCK_DEAD))
2151 break;
2152
2153 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2154 }
2155
2156 finish_wait(sk_sleep(sk), &wait);
2157 unix_state_unlock(sk);
2158 return timeo;
2159 }
2160
2161 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
2162 struct msghdr *msg, size_t size,
2163 int flags)
2164 {
2165 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
2166 struct scm_cookie tmp_scm;
2167 struct sock *sk = sock->sk;
2168 struct unix_sock *u = unix_sk(sk);
2169 struct sockaddr_un *sunaddr = msg->msg_name;
2170 int copied = 0;
2171 int noblock = flags & MSG_DONTWAIT;
2172 int check_creds = 0;
2173 int target;
2174 int err = 0;
2175 long timeo;
2176 int skip;
2177 struct sock * other = unix_peer(sk);
2178
2179 err = -EINVAL;
2180 if (sk->sk_state != TCP_ESTABLISHED)
2181 goto out;
2182
2183 err = -EOPNOTSUPP;
2184 if (flags&MSG_OOB)
2185 goto out;
2186
2187 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
2188 timeo = sock_rcvtimeo(sk, noblock);
2189
2190 /* Lock the socket to prevent queue disordering
2191 * while sleeps in memcpy_tomsg
2192 */
2193
2194 if (!siocb->scm) {
2195 siocb->scm = &tmp_scm;
2196 memset(&tmp_scm, 0, sizeof(tmp_scm));
2197 }
2198
2199 mutex_lock(&u->readlock);
2200
2201 do {
2202 int chunk;
2203 struct sk_buff *skb, *last;
2204
2205 unix_state_lock(sk);
2206 if (sock_flag(sk, SOCK_DEAD)) {
2207 err = -ECONNRESET;
2208 goto unlock;
2209 }
2210 last = skb = skb_peek(&sk->sk_receive_queue);
2211 again:
2212 if (skb == NULL) {
2213 unix_sk(sk)->recursion_level = 0;
2214 if (copied >= target)
2215 goto unlock;
2216
2217 /*
2218 * POSIX 1003.1g mandates this order.
2219 */
2220
2221 err = sock_error(sk);
2222 if (err)
2223 goto unlock;
2224 if (sk->sk_shutdown & RCV_SHUTDOWN)
2225 {
2226 if(sk && sk->sk_socket )
2227 {
2228 if(other && other->sk_socket ){
2229 #ifdef CONFIG_MTK_NET_LOGGING
2230
2231 printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to peer shutdown \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
2232 #endif
2233 }else{
2234 #ifdef CONFIG_MTK_NET_LOGGING
2235 printk(KERN_INFO "[mtk_net][unix]: recvmsg[%lu:null]:exit read due to peer shutdown \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
2236 #endif
2237 }
2238 }
2239 else{
2240 #ifdef CONFIG_MTK_NET_LOGGING
2241 printk(KERN_INFO " [mtk_net][unix]: recvmsg: exit read due to peer shutdown \n" );
2242 #endif
2243 }
2244 goto unlock;
2245 }
2246 unix_state_unlock(sk);
2247 err = -EAGAIN;
2248 if (!timeo)
2249 break;
2250 mutex_unlock(&u->readlock);
2251
2252 timeo = unix_stream_data_wait(sk, timeo, last);
2253 if (!timeo)
2254 {
2255 if(sk && sk->sk_socket )
2256 {
2257 if(other && other->sk_socket ){
2258 #ifdef CONFIG_MTK_NET_LOGGING
2259 printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to timeout \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
2260 #endif
2261 }else{
2262 #ifdef CONFIG_MTK_NET_LOGGING
2263 printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:null]:exit read due to timeout \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
2264 #endif
2265 }
2266 }
2267 else
2268 {
2269 #ifdef CONFIG_MTK_NET_LOGGING
2270 printk(KERN_INFO " [mtk_net][unix]: recvmsg:exit read due to timeout \n" );
2271 #endif
2272 }
2273
2274 }
2275
2276 if (signal_pending(current)) {
2277 err = sock_intr_errno(timeo);
2278 goto out;
2279 }
2280
2281 mutex_lock(&u->readlock);
2282 continue;
2283 unlock:
2284 unix_state_unlock(sk);
2285 break;
2286 }
2287
2288 skip = sk_peek_offset(sk, flags);
2289 while (skip >= skb->len) {
2290 skip -= skb->len;
2291 last = skb;
2292 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2293 if (!skb)
2294 goto again;
2295 }
2296
2297 unix_state_unlock(sk);
2298
2299 if (check_creds) {
2300 /* Never glue messages from different writers */
2301 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
2302 !uid_eq(UNIXCB(skb).uid, siocb->scm->creds.uid) ||
2303 !gid_eq(UNIXCB(skb).gid, siocb->scm->creds.gid))
2304 break;
2305 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2306 /* Copy credentials */
2307 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2308 check_creds = 1;
2309 }
2310
2311 /* Copy address just once */
2312 if (sunaddr) {
2313 unix_copy_addr(msg, skb->sk);
2314 sunaddr = NULL;
2315 }
2316
2317 chunk = min_t(unsigned int, skb->len - skip, size);
2318 if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
2319 if (copied == 0)
2320 copied = -EFAULT;
2321 break;
2322 }
2323 copied += chunk;
2324 size -= chunk;
2325
2326 /* Mark read part of skb as used */
2327 if (!(flags & MSG_PEEK)) {
2328 skb_pull(skb, chunk);
2329
2330 sk_peek_offset_bwd(sk, chunk);
2331
2332 if (UNIXCB(skb).fp)
2333 unix_detach_fds(siocb->scm, skb);
2334
2335 if (skb->len)
2336 break;
2337
2338 skb_unlink(skb, &sk->sk_receive_queue);
2339 consume_skb(skb);
2340
2341 if (siocb->scm->fp)
2342 break;
2343 } else {
2344 /* It is questionable, see note in unix_dgram_recvmsg.
2345 */
2346 if (UNIXCB(skb).fp)
2347 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2348
2349 if (skip) {
2350 sk_peek_offset_fwd(sk, chunk);
2351 skip -= chunk;
2352 }
2353
2354 if (UNIXCB(skb).fp)
2355 break;
2356
2357 last = skb;
2358 unix_state_lock(sk);
2359 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2360 if (skb)
2361 goto again;
2362 unix_state_unlock(sk);
2363 break;
2364 }
2365 } while (size);
2366
2367 mutex_unlock(&u->readlock);
2368 scm_recv(sock, msg, siocb->scm, flags);
2369 out:
2370
2371 return copied ? : err;
2372 }
2373
2374 static int unix_shutdown(struct socket *sock, int mode)
2375 {
2376 struct sock *sk = sock->sk;
2377 struct sock *other;
2378
2379 if (mode < SHUT_RD || mode > SHUT_RDWR)
2380 return -EINVAL;
2381 /* This maps:
2382 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2383 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2384 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2385 */
2386 ++mode;
2387
2388 unix_state_lock(sk);
2389 sk->sk_shutdown |= mode;
2390 other = unix_peer(sk);
2391 if (other)
2392 sock_hold(other);
2393 unix_state_unlock(sk);
2394 sk->sk_state_change(sk);
2395
2396 if (other &&
2397 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2398
2399 int peer_mode = 0;
2400
2401 if (mode&RCV_SHUTDOWN)
2402 peer_mode |= SEND_SHUTDOWN;
2403 if (mode&SEND_SHUTDOWN)
2404 peer_mode |= RCV_SHUTDOWN;
2405 unix_state_lock(other);
2406 other->sk_shutdown |= peer_mode;
2407 unix_state_unlock(other);
2408 other->sk_state_change(other);
2409 if (peer_mode == SHUTDOWN_MASK)
2410 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2411 else if (peer_mode & RCV_SHUTDOWN)
2412 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2413 }
2414 if (other)
2415 sock_put(other);
2416
2417 return 0;
2418 }
2419
2420 long unix_inq_len(struct sock *sk)
2421 {
2422 struct sk_buff *skb;
2423 long amount = 0;
2424
2425 if (sk->sk_state == TCP_LISTEN)
2426 return -EINVAL;
2427
2428 spin_lock(&sk->sk_receive_queue.lock);
2429 if (sk->sk_type == SOCK_STREAM ||
2430 sk->sk_type == SOCK_SEQPACKET) {
2431 skb_queue_walk(&sk->sk_receive_queue, skb)
2432 amount += skb->len;
2433 } else {
2434 skb = skb_peek(&sk->sk_receive_queue);
2435 if (skb)
2436 amount = skb->len;
2437 }
2438 spin_unlock(&sk->sk_receive_queue.lock);
2439
2440 return amount;
2441 }
2442 EXPORT_SYMBOL_GPL(unix_inq_len);
2443
2444 long unix_outq_len(struct sock *sk)
2445 {
2446 return sk_wmem_alloc_get(sk);
2447 }
2448 EXPORT_SYMBOL_GPL(unix_outq_len);
2449
2450 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2451 {
2452 struct sock *sk = sock->sk;
2453 long amount = 0;
2454 int err;
2455
2456 switch (cmd) {
2457 case SIOCOUTQ:
2458 amount = unix_outq_len(sk);
2459 err = put_user(amount, (int __user *)arg);
2460 break;
2461 case SIOCINQ:
2462 amount = unix_inq_len(sk);
2463 if (amount < 0)
2464 err = amount;
2465 else
2466 err = put_user(amount, (int __user *)arg);
2467 break;
2468 default:
2469 err = -ENOIOCTLCMD;
2470 break;
2471 }
2472 return err;
2473 }
2474
2475 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2476 {
2477 struct sock *sk = sock->sk;
2478 unsigned int mask;
2479
2480 sock_poll_wait(file, sk_sleep(sk), wait);
2481 mask = 0;
2482
2483 /* exceptional events? */
2484 if (sk->sk_err)
2485 mask |= POLLERR;
2486 if (sk->sk_shutdown == SHUTDOWN_MASK)
2487 mask |= POLLHUP;
2488 if (sk->sk_shutdown & RCV_SHUTDOWN)
2489 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2490
2491 /* readable? */
2492 if (!skb_queue_empty(&sk->sk_receive_queue))
2493 mask |= POLLIN | POLLRDNORM;
2494
2495 /* Connection-based need to check for termination and startup */
2496 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2497 sk->sk_state == TCP_CLOSE)
2498 mask |= POLLHUP;
2499
2500 /*
2501 * we set writable also when the other side has shut down the
2502 * connection. This prevents stuck sockets.
2503 */
2504 if (unix_writable(sk))
2505 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2506
2507 return mask;
2508 }
2509
2510 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2511 poll_table *wait)
2512 {
2513 struct sock *sk = sock->sk, *other;
2514 unsigned int mask, writable;
2515
2516 sock_poll_wait(file, sk_sleep(sk), wait);
2517 mask = 0;
2518
2519 /* exceptional events? */
2520 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2521 mask |= POLLERR |
2522 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
2523
2524 if (sk->sk_shutdown & RCV_SHUTDOWN)
2525 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2526 if (sk->sk_shutdown == SHUTDOWN_MASK)
2527 mask |= POLLHUP;
2528
2529 /* readable? */
2530 if (!skb_queue_empty(&sk->sk_receive_queue))
2531 mask |= POLLIN | POLLRDNORM;
2532
2533 /* Connection-based need to check for termination and startup */
2534 if (sk->sk_type == SOCK_SEQPACKET) {
2535 if (sk->sk_state == TCP_CLOSE)
2536 mask |= POLLHUP;
2537 /* connection hasn't started yet? */
2538 if (sk->sk_state == TCP_SYN_SENT)
2539 {
2540
2541 return mask;
2542 }
2543 }
2544
2545 /* No write status requested, avoid expensive OUT tests. */
2546 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2547 {
2548 return mask;
2549 }
2550
2551 writable = unix_writable(sk);
2552 if (writable) {
2553 unix_state_lock(sk);
2554
2555 other = unix_peer(sk);
2556 if (other && unix_peer(other) != sk &&
2557 unix_recvq_full(other) &&
2558 unix_dgram_peer_wake_me(sk, other))
2559 writable = 0;
2560
2561 unix_state_unlock(sk);
2562 }
2563
2564 if (writable)
2565 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2566 else
2567 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2568
2569 return mask;
2570 }
2571
2572 #ifdef CONFIG_PROC_FS
2573
2574 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2575
2576 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2577 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2578 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2579
2580 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2581 {
2582 unsigned long offset = get_offset(*pos);
2583 unsigned long bucket = get_bucket(*pos);
2584 struct sock *sk;
2585 unsigned long count = 0;
2586
2587 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2588 if (sock_net(sk) != seq_file_net(seq))
2589 continue;
2590 if (++count == offset)
2591 break;
2592 }
2593
2594 return sk;
2595 }
2596
2597 static struct sock *unix_next_socket(struct seq_file *seq,
2598 struct sock *sk,
2599 loff_t *pos)
2600 {
2601 unsigned long bucket;
2602
2603 while (sk > (struct sock *)SEQ_START_TOKEN) {
2604 sk = sk_next(sk);
2605 if (!sk)
2606 goto next_bucket;
2607 if (sock_net(sk) == seq_file_net(seq))
2608 return sk;
2609 }
2610
2611 do {
2612 sk = unix_from_bucket(seq, pos);
2613 if (sk)
2614 return sk;
2615
2616 next_bucket:
2617 bucket = get_bucket(*pos) + 1;
2618 *pos = set_bucket_offset(bucket, 1);
2619 } while (bucket < ARRAY_SIZE(unix_socket_table));
2620
2621 return NULL;
2622 }
2623
2624 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2625 __acquires(unix_table_lock)
2626 {
2627 spin_lock(&unix_table_lock);
2628
2629 if (!*pos)
2630 return SEQ_START_TOKEN;
2631
2632 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2633 return NULL;
2634
2635 return unix_next_socket(seq, NULL, pos);
2636 }
2637
2638 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2639 {
2640 ++*pos;
2641 return unix_next_socket(seq, v, pos);
2642 }
2643
2644 static void unix_seq_stop(struct seq_file *seq, void *v)
2645 __releases(unix_table_lock)
2646 {
2647 spin_unlock(&unix_table_lock);
2648 }
2649
2650 static int unix_seq_show(struct seq_file *seq, void *v)
2651 {
2652
2653 if (v == SEQ_START_TOKEN)
2654 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2655 "Inode Path\n");
2656 else {
2657 struct sock *s = v;
2658 struct unix_sock *u = unix_sk(s);
2659 unix_state_lock(s);
2660
2661 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2662 s,
2663 atomic_read(&s->sk_refcnt),
2664 0,
2665 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2666 s->sk_type,
2667 s->sk_socket ?
2668 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2669 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2670 sock_i_ino(s));
2671
2672 if (u->addr) {
2673 int i, len;
2674 seq_putc(seq, ' ');
2675
2676 i = 0;
2677 len = u->addr->len - sizeof(short);
2678 if (!UNIX_ABSTRACT(s))
2679 len--;
2680 else {
2681 seq_putc(seq, '@');
2682 i++;
2683 }
2684 for ( ; i < len; i++)
2685 seq_putc(seq, u->addr->name->sun_path[i]);
2686 }
2687 unix_state_unlock(s);
2688 seq_putc(seq, '\n');
2689 }
2690
2691 return 0;
2692 }
2693
2694 static const struct seq_operations unix_seq_ops = {
2695 .start = unix_seq_start,
2696 .next = unix_seq_next,
2697 .stop = unix_seq_stop,
2698 .show = unix_seq_show,
2699 };
2700
2701 static int unix_seq_open(struct inode *inode, struct file *file)
2702 {
2703 return seq_open_net(inode, file, &unix_seq_ops,
2704 sizeof(struct seq_net_private));
2705 }
2706
2707 static const struct file_operations unix_seq_fops = {
2708 .owner = THIS_MODULE,
2709 .open = unix_seq_open,
2710 .read = seq_read,
2711 .llseek = seq_lseek,
2712 .release = seq_release_net,
2713 };
2714
2715 #endif
2716
2717 static const struct net_proto_family unix_family_ops = {
2718 .family = PF_UNIX,
2719 .create = unix_create,
2720 .owner = THIS_MODULE,
2721 };
2722
2723
2724 static int __net_init unix_net_init(struct net *net)
2725 {
2726 int error = -ENOMEM;
2727
2728 net->unx.sysctl_max_dgram_qlen = 10;
2729 if (unix_sysctl_register(net))
2730 goto out;
2731
2732 #ifdef CONFIG_PROC_FS
2733 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2734 unix_sysctl_unregister(net);
2735 goto out;
2736 }
2737 #endif
2738 error = 0;
2739 out:
2740 return error;
2741 }
2742
2743 static void __net_exit unix_net_exit(struct net *net)
2744 {
2745 unix_sysctl_unregister(net);
2746 remove_proc_entry("unix", net->proc_net);
2747 }
2748
2749 static struct pernet_operations unix_net_ops = {
2750 .init = unix_net_init,
2751 .exit = unix_net_exit,
2752 };
2753
2754 static int __init af_unix_init(void)
2755 {
2756 int rc = -1;
2757
2758 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2759
2760 rc = proto_register(&unix_proto, 1);
2761 if (rc != 0) {
2762 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2763 __func__);
2764 goto out;
2765 }
2766
2767 sock_register(&unix_family_ops);
2768 register_pernet_subsys(&unix_net_ops);
2769 out:
2770 return rc;
2771 }
2772
2773 static void __exit af_unix_exit(void)
2774 {
2775 sock_unregister(PF_UNIX);
2776 proto_unregister(&unix_proto);
2777 unregister_pernet_subsys(&unix_net_ops);
2778 }
2779
2780 /* Earlier than device_initcall() so that other drivers invoking
2781 request_module() don't end up in a loop when modprobe tries
2782 to use a UNIX socket. But later than subsys_initcall() because
2783 we depend on stuff initialised there */
2784 fs_initcall(af_unix_init);
2785 module_exit(af_unix_exit);
2786
2787 MODULE_LICENSE("GPL");
2788 MODULE_ALIAS_NETPROTO(PF_UNIX);