Merge tag 'v3.10.95' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / unix / af_unix.c
1 /*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
93 #include <linux/un.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
98 #include <linux/in.h>
99 #include <linux/fs.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
110 #include <net/scm.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
117 #include <linux/freezer.h>
118
119
120 #include <linux/uio.h>
121 #include <linux/blkdev.h>
122 #include <linux/compat.h>
123 #include <linux/rtc.h>
124 #include <asm/kmap_types.h>
125 #include <linux/device.h>
126
127
128 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
129 EXPORT_SYMBOL_GPL(unix_socket_table);
130 DEFINE_SPINLOCK(unix_table_lock);
131 EXPORT_SYMBOL_GPL(unix_table_lock);
132 static atomic_long_t unix_nr_socks;
133
134
135 static struct hlist_head *unix_sockets_unbound(void *addr)
136 {
137 unsigned long hash = (unsigned long)addr;
138
139 hash ^= hash >> 16;
140 hash ^= hash >> 8;
141 hash %= UNIX_HASH_SIZE;
142 return &unix_socket_table[UNIX_HASH_SIZE + hash];
143 }
144
145 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
146
147
148 //for aee interface start
149 #define __UNIX_SOCKET_OUTPUT_BUF_SIZE__ 3500
150 static struct proc_dir_entry *gunix_socket_track_aee_entry = NULL;
151 #define UNIX_SOCK_TRACK_AEE_PROCNAME "driver/usktrk_aee"
152 #define UNIX_SOCK_TRACK_PROC_AEE_SIZE 3072
153
154 static volatile unsigned int unix_sock_track_stop_flag = 0;
155 #define unix_peer(sk) (unix_sk(sk)->peer)
156
157
158 #ifdef CONFIG_SECURITY_NETWORK
159 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
160 {
161 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
162 }
163
164 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
165 {
166 scm->secid = *UNIXSID(skb);
167 }
168 #else
169 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
170 { }
171
172 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
173 { }
174 #endif /* CONFIG_SECURITY_NETWORK */
175
176 /*
177 * SMP locking strategy:
178 * hash table is protected with spinlock unix_table_lock
179 * each socket state is protected by separate spin lock.
180 */
181
182 static inline unsigned int unix_hash_fold(__wsum n)
183 {
184 unsigned int hash = (__force unsigned int)csum_fold(n);
185
186 hash ^= hash>>8;
187 return hash&(UNIX_HASH_SIZE-1);
188 }
189
190
191
192 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
193 {
194 return unix_peer(osk) == sk;
195 }
196
197 static inline int unix_may_send(struct sock *sk, struct sock *osk)
198 {
199 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
200 }
201
202 static inline int unix_recvq_full(struct sock const *sk)
203 {
204 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
205 }
206
207 struct sock *unix_peer_get(struct sock *s)
208 {
209 struct sock *peer;
210
211 unix_state_lock(s);
212 peer = unix_peer(s);
213 if (peer)
214 sock_hold(peer);
215 unix_state_unlock(s);
216 return peer;
217 }
218 EXPORT_SYMBOL_GPL(unix_peer_get);
219
220 static inline void unix_release_addr(struct unix_address *addr)
221 {
222 if (atomic_dec_and_test(&addr->refcnt))
223 kfree(addr);
224 }
225
226 /*
227 * Check unix socket name:
228 * - should be not zero length.
229 * - if started by not zero, should be NULL terminated (FS object)
230 * - if started by zero, it is abstract name.
231 */
232
233 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
234 {
235 if (len <= sizeof(short) || len > sizeof(*sunaddr))
236 return -EINVAL;
237 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
238 return -EINVAL;
239 if (sunaddr->sun_path[0]) {
240 /*
241 * This may look like an off by one error but it is a bit more
242 * subtle. 108 is the longest valid AF_UNIX path for a binding.
243 * sun_path[108] doesn't as such exist. However in kernel space
244 * we are guaranteed that it is a valid memory location in our
245 * kernel address buffer.
246 */
247 ((char *)sunaddr)[len] = 0;
248 len = strlen(sunaddr->sun_path)+1+sizeof(short);
249 return len;
250 }
251
252 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
253 return len;
254 }
255
256 static void __unix_remove_socket(struct sock *sk)
257 {
258 sk_del_node_init(sk);
259 }
260
261 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
262 {
263 WARN_ON(!sk_unhashed(sk));
264 sk_add_node(sk, list);
265 }
266
267 static inline void unix_remove_socket(struct sock *sk)
268 {
269 spin_lock(&unix_table_lock);
270 __unix_remove_socket(sk);
271 spin_unlock(&unix_table_lock);
272 }
273
274 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
275 {
276 spin_lock(&unix_table_lock);
277 __unix_insert_socket(list, sk);
278 spin_unlock(&unix_table_lock);
279 }
280
281 static struct sock *__unix_find_socket_byname(struct net *net,
282 struct sockaddr_un *sunname,
283 int len, int type, unsigned int hash)
284 {
285 struct sock *s;
286
287 sk_for_each(s, &unix_socket_table[hash ^ type]) {
288 struct unix_sock *u = unix_sk(s);
289
290 if (!net_eq(sock_net(s), net))
291 continue;
292
293 if (u->addr->len == len &&
294 !memcmp(u->addr->name, sunname, len))
295 goto found;
296 }
297 s = NULL;
298 found:
299 return s;
300 }
301
302 static inline struct sock *unix_find_socket_byname(struct net *net,
303 struct sockaddr_un *sunname,
304 int len, int type,
305 unsigned int hash)
306 {
307 struct sock *s;
308
309 spin_lock(&unix_table_lock);
310 s = __unix_find_socket_byname(net, sunname, len, type, hash);
311 if (s)
312 sock_hold(s);
313 spin_unlock(&unix_table_lock);
314 return s;
315 }
316
317 static struct sock *unix_find_socket_byinode(struct inode *i)
318 {
319 struct sock *s;
320
321 spin_lock(&unix_table_lock);
322 sk_for_each(s,
323 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
324 struct dentry *dentry = unix_sk(s)->path.dentry;
325
326 if (dentry && dentry->d_inode == i) {
327 sock_hold(s);
328 goto found;
329 }
330 }
331 s = NULL;
332 found:
333 spin_unlock(&unix_table_lock);
334 return s;
335 }
336
337 /* Support code for asymmetrically connected dgram sockets
338 *
339 * If a datagram socket is connected to a socket not itself connected
340 * to the first socket (eg, /dev/log), clients may only enqueue more
341 * messages if the present receive queue of the server socket is not
342 * "too large". This means there's a second writeability condition
343 * poll and sendmsg need to test. The dgram recv code will do a wake
344 * up on the peer_wait wait queue of a socket upon reception of a
345 * datagram which needs to be propagated to sleeping would-be writers
346 * since these might not have sent anything so far. This can't be
347 * accomplished via poll_wait because the lifetime of the server
348 * socket might be less than that of its clients if these break their
349 * association with it or if the server socket is closed while clients
350 * are still connected to it and there's no way to inform "a polling
351 * implementation" that it should let go of a certain wait queue
352 *
353 * In order to propagate a wake up, a wait_queue_t of the client
354 * socket is enqueued on the peer_wait queue of the server socket
355 * whose wake function does a wake_up on the ordinary client socket
356 * wait queue. This connection is established whenever a write (or
357 * poll for write) hit the flow control condition and broken when the
358 * association to the server socket is dissolved or after a wake up
359 * was relayed.
360 */
361
362 static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
363 void *key)
364 {
365 struct unix_sock *u;
366 wait_queue_head_t *u_sleep;
367
368 u = container_of(q, struct unix_sock, peer_wake);
369
370 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
371 q);
372 u->peer_wake.private = NULL;
373
374 /* relaying can only happen while the wq still exists */
375 u_sleep = sk_sleep(&u->sk);
376 if (u_sleep)
377 wake_up_interruptible_poll(u_sleep, key);
378
379 return 0;
380 }
381
382 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
383 {
384 struct unix_sock *u, *u_other;
385 int rc;
386
387 u = unix_sk(sk);
388 u_other = unix_sk(other);
389 rc = 0;
390 spin_lock(&u_other->peer_wait.lock);
391
392 if (!u->peer_wake.private) {
393 u->peer_wake.private = other;
394 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
395
396 rc = 1;
397 }
398
399 spin_unlock(&u_other->peer_wait.lock);
400 return rc;
401 }
402
403 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
404 struct sock *other)
405 {
406 struct unix_sock *u, *u_other;
407
408 u = unix_sk(sk);
409 u_other = unix_sk(other);
410 spin_lock(&u_other->peer_wait.lock);
411
412 if (u->peer_wake.private == other) {
413 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
414 u->peer_wake.private = NULL;
415 }
416
417 spin_unlock(&u_other->peer_wait.lock);
418 }
419
420 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
421 struct sock *other)
422 {
423 unix_dgram_peer_wake_disconnect(sk, other);
424 wake_up_interruptible_poll(sk_sleep(sk),
425 POLLOUT |
426 POLLWRNORM |
427 POLLWRBAND);
428 }
429
430 /* preconditions:
431 * - unix_peer(sk) == other
432 * - association is stable
433 */
434 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
435 {
436 int connected;
437
438 connected = unix_dgram_peer_wake_connect(sk, other);
439
440 if (unix_recvq_full(other))
441 return 1;
442
443 if (connected)
444 unix_dgram_peer_wake_disconnect(sk, other);
445
446 return 0;
447 }
448
449 static inline int unix_writable(struct sock *sk)
450 {
451 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
452 }
453
454 static void unix_write_space(struct sock *sk)
455 {
456 struct socket_wq *wq;
457
458 rcu_read_lock();
459 if (unix_writable(sk)) {
460 wq = rcu_dereference(sk->sk_wq);
461 if (wq_has_sleeper(wq))
462 wake_up_interruptible_sync_poll(&wq->wait,
463 POLLOUT | POLLWRNORM | POLLWRBAND);
464 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
465 }
466 rcu_read_unlock();
467 }
468
469 /* When dgram socket disconnects (or changes its peer), we clear its receive
470 * queue of packets arrived from previous peer. First, it allows to do
471 * flow control based only on wmem_alloc; second, sk connected to peer
472 * may receive messages only from that peer. */
473 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
474 {
475 if (!skb_queue_empty(&sk->sk_receive_queue)) {
476 skb_queue_purge(&sk->sk_receive_queue);
477 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
478
479 /* If one link of bidirectional dgram pipe is disconnected,
480 * we signal error. Messages are lost. Do not make this,
481 * when peer was not connected to us.
482 */
483 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
484 other->sk_err = ECONNRESET;
485 other->sk_error_report(other);
486 }
487 }
488 }
489
490 static void unix_sock_destructor(struct sock *sk)
491 {
492 struct unix_sock *u = unix_sk(sk);
493
494 skb_queue_purge(&sk->sk_receive_queue);
495
496 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
497 WARN_ON(!sk_unhashed(sk));
498 WARN_ON(sk->sk_socket);
499 if (!sock_flag(sk, SOCK_DEAD)) {
500 #ifdef CONFIG_MTK_NET_LOGGING
501 printk(KERN_INFO "[mtk_net][unix]Attempt to release alive unix socket: %p\n", sk);
502 #endif
503 return;
504 }
505
506 if (u->addr)
507 unix_release_addr(u->addr);
508
509 atomic_long_dec(&unix_nr_socks);
510 local_bh_disable();
511 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
512 local_bh_enable();
513 #ifdef UNIX_REFCNT_DEBUG
514 printk(KERN_DEBUG "[mtk_net][unix]UNIX %p is destroyed, %ld are still alive.\n", sk,
515 atomic_long_read(&unix_nr_socks));
516 #endif
517 }
518
519 static void unix_release_sock(struct sock *sk, int embrion)
520 {
521 struct unix_sock *u = unix_sk(sk);
522 struct path path;
523 struct sock *skpair;
524 struct sk_buff *skb;
525 int state;
526
527 unix_remove_socket(sk);
528
529 /* Clear state */
530 unix_state_lock(sk);
531 sock_orphan(sk);
532 sk->sk_shutdown = SHUTDOWN_MASK;
533 path = u->path;
534 u->path.dentry = NULL;
535 u->path.mnt = NULL;
536 state = sk->sk_state;
537 sk->sk_state = TCP_CLOSE;
538 unix_state_unlock(sk);
539
540 wake_up_interruptible_all(&u->peer_wait);
541
542 skpair = unix_peer(sk);
543
544 if (skpair != NULL) {
545 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
546 unix_state_lock(skpair);
547 /* No more writes */
548 skpair->sk_shutdown = SHUTDOWN_MASK;
549 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
550 skpair->sk_err = ECONNRESET;
551 unix_state_unlock(skpair);
552 skpair->sk_state_change(skpair);
553 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
554 }
555
556 unix_dgram_peer_wake_disconnect(sk, skpair);
557 sock_put(skpair); /* It may now die */
558 unix_peer(sk) = NULL;
559 }
560
561 /* Try to flush out this socket. Throw out buffers at least */
562
563 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
564 if (state == TCP_LISTEN)
565 unix_release_sock(skb->sk, 1);
566 /* passed fds are erased in the kfree_skb hook */
567 kfree_skb(skb);
568 }
569
570 if (path.dentry)
571 path_put(&path);
572
573 sock_put(sk);
574
575 /* ---- Socket is dead now and most probably destroyed ---- */
576
577 /*
578 * Fixme: BSD difference: In BSD all sockets connected to us get
579 * ECONNRESET and we die on the spot. In Linux we behave
580 * like files and pipes do and wait for the last
581 * dereference.
582 *
583 * Can't we simply set sock->err?
584 *
585 * What the above comment does talk about? --ANK(980817)
586 */
587
588 if (unix_tot_inflight)
589 unix_gc(); /* Garbage collect fds */
590 }
591
592 static void init_peercred(struct sock *sk)
593 {
594 put_pid(sk->sk_peer_pid);
595 if (sk->sk_peer_cred)
596 put_cred(sk->sk_peer_cred);
597 sk->sk_peer_pid = get_pid(task_tgid(current));
598 sk->sk_peer_cred = get_current_cred();
599 }
600
601 static void copy_peercred(struct sock *sk, struct sock *peersk)
602 {
603 put_pid(sk->sk_peer_pid);
604 if (sk->sk_peer_cred)
605 put_cred(sk->sk_peer_cred);
606 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
607 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
608 }
609
610 static int unix_listen(struct socket *sock, int backlog)
611 {
612 int err;
613 struct sock *sk = sock->sk;
614 struct unix_sock *u = unix_sk(sk);
615 struct pid *old_pid = NULL;
616
617 err = -EOPNOTSUPP;
618 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
619 goto out; /* Only stream/seqpacket sockets accept */
620 err = -EINVAL;
621 if (!u->addr)
622 goto out; /* No listens on an unbound socket */
623 unix_state_lock(sk);
624 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
625 goto out_unlock;
626 if (backlog > sk->sk_max_ack_backlog)
627 wake_up_interruptible_all(&u->peer_wait);
628 sk->sk_max_ack_backlog = backlog;
629 sk->sk_state = TCP_LISTEN;
630 /* set credentials so connect can copy them */
631 init_peercred(sk);
632 err = 0;
633
634 out_unlock:
635 unix_state_unlock(sk);
636 put_pid(old_pid);
637 out:
638
639 return err;
640 }
641
642 static int unix_release(struct socket *);
643 static int unix_bind(struct socket *, struct sockaddr *, int);
644 static int unix_stream_connect(struct socket *, struct sockaddr *,
645 int addr_len, int flags);
646 static int unix_socketpair(struct socket *, struct socket *);
647 static int unix_accept(struct socket *, struct socket *, int);
648 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
649 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
650 static unsigned int unix_dgram_poll(struct file *, struct socket *,
651 poll_table *);
652 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
653 static int unix_shutdown(struct socket *, int);
654 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
655 struct msghdr *, size_t);
656 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
657 struct msghdr *, size_t, int);
658 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
659 struct msghdr *, size_t);
660 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
661 struct msghdr *, size_t, int);
662 static int unix_dgram_connect(struct socket *, struct sockaddr *,
663 int, int);
664 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
665 struct msghdr *, size_t);
666 static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
667 struct msghdr *, size_t, int);
668
669 static int unix_set_peek_off(struct sock *sk, int val)
670 {
671 struct unix_sock *u = unix_sk(sk);
672
673 if (mutex_lock_interruptible(&u->readlock))
674 return -EINTR;
675
676 sk->sk_peek_off = val;
677 mutex_unlock(&u->readlock);
678
679 return 0;
680 }
681
682
683 static const struct proto_ops unix_stream_ops = {
684 .family = PF_UNIX,
685 .owner = THIS_MODULE,
686 .release = unix_release,
687 .bind = unix_bind,
688 .connect = unix_stream_connect,
689 .socketpair = unix_socketpair,
690 .accept = unix_accept,
691 .getname = unix_getname,
692 .poll = unix_poll,
693 .ioctl = unix_ioctl,
694 .listen = unix_listen,
695 .shutdown = unix_shutdown,
696 .setsockopt = sock_no_setsockopt,
697 .getsockopt = sock_no_getsockopt,
698 .sendmsg = unix_stream_sendmsg,
699 .recvmsg = unix_stream_recvmsg,
700 .mmap = sock_no_mmap,
701 .sendpage = sock_no_sendpage,
702 .set_peek_off = unix_set_peek_off,
703 };
704
705 static const struct proto_ops unix_dgram_ops = {
706 .family = PF_UNIX,
707 .owner = THIS_MODULE,
708 .release = unix_release,
709 .bind = unix_bind,
710 .connect = unix_dgram_connect,
711 .socketpair = unix_socketpair,
712 .accept = sock_no_accept,
713 .getname = unix_getname,
714 .poll = unix_dgram_poll,
715 .ioctl = unix_ioctl,
716 .listen = sock_no_listen,
717 .shutdown = unix_shutdown,
718 .setsockopt = sock_no_setsockopt,
719 .getsockopt = sock_no_getsockopt,
720 .sendmsg = unix_dgram_sendmsg,
721 .recvmsg = unix_dgram_recvmsg,
722 .mmap = sock_no_mmap,
723 .sendpage = sock_no_sendpage,
724 .set_peek_off = unix_set_peek_off,
725 };
726
727 static const struct proto_ops unix_seqpacket_ops = {
728 .family = PF_UNIX,
729 .owner = THIS_MODULE,
730 .release = unix_release,
731 .bind = unix_bind,
732 .connect = unix_stream_connect,
733 .socketpair = unix_socketpair,
734 .accept = unix_accept,
735 .getname = unix_getname,
736 .poll = unix_dgram_poll,
737 .ioctl = unix_ioctl,
738 .listen = unix_listen,
739 .shutdown = unix_shutdown,
740 .setsockopt = sock_no_setsockopt,
741 .getsockopt = sock_no_getsockopt,
742 .sendmsg = unix_seqpacket_sendmsg,
743 .recvmsg = unix_seqpacket_recvmsg,
744 .mmap = sock_no_mmap,
745 .sendpage = sock_no_sendpage,
746 .set_peek_off = unix_set_peek_off,
747 };
748
749 static struct proto unix_proto = {
750 .name = "UNIX",
751 .owner = THIS_MODULE,
752 .obj_size = sizeof(struct unix_sock),
753 };
754
755 /*
756 * AF_UNIX sockets do not interact with hardware, hence they
757 * dont trigger interrupts - so it's safe for them to have
758 * bh-unsafe locking for their sk_receive_queue.lock. Split off
759 * this special lock-class by reinitializing the spinlock key:
760 */
761 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
762
763 static struct sock *unix_create1(struct net *net, struct socket *sock)
764 {
765 struct sock *sk = NULL;
766 struct unix_sock *u;
767
768 atomic_long_inc(&unix_nr_socks);
769 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
770 goto out;
771
772 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
773 if (!sk)
774 goto out;
775
776 sock_init_data(sock, sk);
777 lockdep_set_class(&sk->sk_receive_queue.lock,
778 &af_unix_sk_receive_queue_lock_key);
779
780 sk->sk_write_space = unix_write_space;
781 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
782 sk->sk_destruct = unix_sock_destructor;
783 u = unix_sk(sk);
784 u->path.dentry = NULL;
785 u->path.mnt = NULL;
786 spin_lock_init(&u->lock);
787 atomic_long_set(&u->inflight, 0);
788 INIT_LIST_HEAD(&u->link);
789 mutex_init(&u->readlock); /* single task reading lock */
790 init_waitqueue_head(&u->peer_wait);
791 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
792 unix_insert_socket(unix_sockets_unbound(sk), sk);
793 out:
794 if (sk == NULL)
795 atomic_long_dec(&unix_nr_socks);
796 else {
797 local_bh_disable();
798 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
799 local_bh_enable();
800 }
801 return sk;
802 }
803
804 static int unix_create(struct net *net, struct socket *sock, int protocol,
805 int kern)
806 {
807 if (protocol && protocol != PF_UNIX)
808 return -EPROTONOSUPPORT;
809
810 sock->state = SS_UNCONNECTED;
811
812 switch (sock->type) {
813 case SOCK_STREAM:
814 sock->ops = &unix_stream_ops;
815 break;
816 /*
817 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
818 * nothing uses it.
819 */
820 case SOCK_RAW:
821 sock->type = SOCK_DGRAM;
822 case SOCK_DGRAM:
823 sock->ops = &unix_dgram_ops;
824 break;
825 case SOCK_SEQPACKET:
826 sock->ops = &unix_seqpacket_ops;
827 break;
828 default:
829 return -ESOCKTNOSUPPORT;
830 }
831
832 return unix_create1(net, sock) ? 0 : -ENOMEM;
833 }
834
835 static int unix_release(struct socket *sock)
836 {
837 struct sock *sk = sock->sk;
838
839 if (!sk)
840 return 0;
841
842 unix_release_sock(sk, 0);
843 sock->sk = NULL;
844
845 return 0;
846 }
847
848 static int unix_autobind(struct socket *sock)
849 {
850 struct sock *sk = sock->sk;
851 struct net *net = sock_net(sk);
852 struct unix_sock *u = unix_sk(sk);
853 static u32 ordernum = 1;
854 struct unix_address *addr;
855 int err;
856 unsigned int retries = 0;
857
858 err = mutex_lock_interruptible(&u->readlock);
859 if (err)
860 return err;
861
862 err = 0;
863 if (u->addr)
864 goto out;
865
866 err = -ENOMEM;
867 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
868 if (!addr)
869 goto out;
870
871 addr->name->sun_family = AF_UNIX;
872 atomic_set(&addr->refcnt, 1);
873
874 retry:
875 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
876 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
877
878 spin_lock(&unix_table_lock);
879 ordernum = (ordernum+1)&0xFFFFF;
880
881 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
882 addr->hash)) {
883 spin_unlock(&unix_table_lock);
884 /*
885 * __unix_find_socket_byname() may take long time if many names
886 * are already in use.
887 */
888 cond_resched();
889 /* Give up if all names seems to be in use. */
890 if (retries++ == 0xFFFFF) {
891 err = -ENOSPC;
892 kfree(addr);
893 goto out;
894 }
895 goto retry;
896 }
897 addr->hash ^= sk->sk_type;
898
899 __unix_remove_socket(sk);
900 u->addr = addr;
901 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
902 spin_unlock(&unix_table_lock);
903 err = 0;
904
905 out: mutex_unlock(&u->readlock);
906 return err;
907 }
908
909 static struct sock *unix_find_other(struct net *net,
910 struct sockaddr_un *sunname, int len,
911 int type, unsigned int hash, int *error)
912 {
913 struct sock *u;
914 struct path path;
915 int err = 0;
916
917 if (sunname->sun_path[0]) {
918 struct inode *inode;
919 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
920 if (err)
921 goto fail;
922 inode = path.dentry->d_inode;
923 err = inode_permission(inode, MAY_WRITE);
924 if (err)
925 goto put_fail;
926
927 err = -ECONNREFUSED;
928 if (!S_ISSOCK(inode->i_mode))
929 goto put_fail;
930 u = unix_find_socket_byinode(inode);
931 if (!u)
932 goto put_fail;
933
934 if (u->sk_type == type)
935 touch_atime(&path);
936
937 path_put(&path);
938
939 err = -EPROTOTYPE;
940 if (u->sk_type != type) {
941 sock_put(u);
942 goto fail;
943 }
944 } else {
945 err = -ECONNREFUSED;
946 u = unix_find_socket_byname(net, sunname, len, type, hash);
947 if (u) {
948 struct dentry *dentry;
949 dentry = unix_sk(u)->path.dentry;
950 if (dentry)
951 touch_atime(&unix_sk(u)->path);
952 } else
953 goto fail;
954 }
955 return u;
956
957 put_fail:
958 path_put(&path);
959 fail:
960 *error = err;
961 return NULL;
962 }
963
964 static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
965 {
966 struct dentry *dentry;
967 struct path path;
968 int err = 0;
969 /*
970 * Get the parent directory, calculate the hash for last
971 * component.
972 */
973 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
974 err = PTR_ERR(dentry);
975 if (IS_ERR(dentry))
976 return err;
977
978 /*
979 * All right, let's create it.
980 */
981 err = security_path_mknod(&path, dentry, mode, 0);
982 if (!err) {
983 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
984 if (!err) {
985 res->mnt = mntget(path.mnt);
986 res->dentry = dget(dentry);
987 }
988 }
989 done_path_create(&path, dentry);
990 return err;
991 }
992
993 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
994 {
995 struct sock *sk = sock->sk;
996 struct net *net = sock_net(sk);
997 struct unix_sock *u = unix_sk(sk);
998 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
999 char *sun_path = sunaddr->sun_path;
1000 int err;
1001 unsigned int hash;
1002 struct unix_address *addr;
1003 struct hlist_head *list;
1004
1005 err = -EINVAL;
1006 if (sunaddr->sun_family != AF_UNIX)
1007 goto out;
1008
1009 if (addr_len == sizeof(short)) {
1010 err = unix_autobind(sock);
1011 goto out;
1012 }
1013
1014 err = unix_mkname(sunaddr, addr_len, &hash);
1015 if (err < 0)
1016 goto out;
1017 addr_len = err;
1018
1019 err = mutex_lock_interruptible(&u->readlock);
1020 if (err)
1021 goto out;
1022
1023 err = -EINVAL;
1024 if (u->addr)
1025 goto out_up;
1026
1027 err = -ENOMEM;
1028 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1029 if (!addr)
1030 goto out_up;
1031
1032 memcpy(addr->name, sunaddr, addr_len);
1033 addr->len = addr_len;
1034 addr->hash = hash ^ sk->sk_type;
1035 atomic_set(&addr->refcnt, 1);
1036
1037 if (sun_path[0]) {
1038 struct path path;
1039
1040 umode_t mode = S_IFSOCK |
1041 (SOCK_INODE(sock)->i_mode & ~current_umask());
1042 err = unix_mknod(sun_path, mode, &path);
1043 if (err) {
1044 if (err == -EEXIST)
1045 err = -EADDRINUSE;
1046 unix_release_addr(addr);
1047 goto out_up;
1048 }
1049 addr->hash = UNIX_HASH_SIZE;
1050 hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
1051 spin_lock(&unix_table_lock);
1052 u->path = path;
1053 list = &unix_socket_table[hash];
1054 } else {
1055 spin_lock(&unix_table_lock);
1056 err = -EADDRINUSE;
1057 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1058 sk->sk_type, hash)) {
1059 unix_release_addr(addr);
1060 goto out_unlock;
1061 }
1062
1063 list = &unix_socket_table[addr->hash];
1064 }
1065
1066 err = 0;
1067 __unix_remove_socket(sk);
1068 u->addr = addr;
1069 __unix_insert_socket(list, sk);
1070
1071 out_unlock:
1072 spin_unlock(&unix_table_lock);
1073 out_up:
1074 mutex_unlock(&u->readlock);
1075 out:
1076
1077 return err;
1078 }
1079
1080 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1081 {
1082 if (unlikely(sk1 == sk2) || !sk2) {
1083 unix_state_lock(sk1);
1084 return;
1085 }
1086 if (sk1 < sk2) {
1087 unix_state_lock(sk1);
1088 unix_state_lock_nested(sk2);
1089 } else {
1090 unix_state_lock(sk2);
1091 unix_state_lock_nested(sk1);
1092 }
1093 }
1094
1095 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1096 {
1097 if (unlikely(sk1 == sk2) || !sk2) {
1098 unix_state_unlock(sk1);
1099 return;
1100 }
1101 unix_state_unlock(sk1);
1102 unix_state_unlock(sk2);
1103 }
1104
1105 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1106 int alen, int flags)
1107 {
1108 struct sock *sk = sock->sk;
1109 struct net *net = sock_net(sk);
1110 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1111 struct sock *other;
1112 unsigned int hash;
1113 int err;
1114
1115 if (addr->sa_family != AF_UNSPEC) {
1116
1117 err = unix_mkname(sunaddr, alen, &hash);
1118 if (err < 0)
1119 goto out;
1120 alen = err;
1121
1122 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1123 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1124 goto out;
1125
1126 restart:
1127 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1128 if (!other)
1129 goto out;
1130
1131 unix_state_double_lock(sk, other);
1132
1133 /* Apparently VFS overslept socket death. Retry. */
1134 if (sock_flag(other, SOCK_DEAD)) {
1135 unix_state_double_unlock(sk, other);
1136 sock_put(other);
1137 goto restart;
1138 }
1139
1140 err = -EPERM;
1141 if (!unix_may_send(sk, other))
1142 goto out_unlock;
1143
1144 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1145 if (err)
1146 goto out_unlock;
1147
1148 } else {
1149 /*
1150 * 1003.1g breaking connected state with AF_UNSPEC
1151 */
1152 other = NULL;
1153 unix_state_double_lock(sk, other);
1154 }
1155
1156 /*
1157 * If it was connected, reconnect.
1158 */
1159 if (unix_peer(sk)) {
1160 struct sock *old_peer = unix_peer(sk);
1161 unix_peer(sk) = other;
1162 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1163
1164 unix_state_double_unlock(sk, other);
1165
1166 if (other != old_peer)
1167 unix_dgram_disconnected(sk, old_peer);
1168 sock_put(old_peer);
1169 } else {
1170 unix_peer(sk) = other;
1171 unix_state_double_unlock(sk, other);
1172 }
1173
1174 #ifdef CONFIG_MTK_NET_LOGGING
1175 if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
1176 {
1177 printk(KERN_INFO "[mtk_net][socket]unix_dgram_connect[%lu]:connect [%s] other[%lu]\n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
1178 }
1179 #endif
1180
1181 return 0;
1182
1183 out_unlock:
1184 unix_state_double_unlock(sk, other);
1185 sock_put(other);
1186 out:
1187
1188 return err;
1189 }
1190
1191 static long unix_wait_for_peer(struct sock *other, long timeo)
1192 {
1193 struct unix_sock *u = unix_sk(other);
1194 int sched;
1195 DEFINE_WAIT(wait);
1196
1197 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1198
1199 sched = !sock_flag(other, SOCK_DEAD) &&
1200 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1201 unix_recvq_full(other);
1202
1203 unix_state_unlock(other);
1204
1205 if (sched)
1206 timeo = schedule_timeout(timeo);
1207
1208 finish_wait(&u->peer_wait, &wait);
1209 return timeo;
1210 }
1211
1212 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1213 int addr_len, int flags)
1214 {
1215 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1216 struct sock *sk = sock->sk;
1217 struct net *net = sock_net(sk);
1218 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1219 struct sock *newsk = NULL;
1220 struct sock *other = NULL;
1221 struct sk_buff *skb = NULL;
1222 unsigned int hash;
1223 int st;
1224 int err;
1225 long timeo;
1226
1227 err = unix_mkname(sunaddr, addr_len, &hash);
1228 if (err < 0)
1229 goto out;
1230 addr_len = err;
1231
1232 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1233 (err = unix_autobind(sock)) != 0)
1234 goto out;
1235
1236 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1237
1238 /* First of all allocate resources.
1239 If we will make it after state is locked,
1240 we will have to recheck all again in any case.
1241 */
1242
1243 err = -ENOMEM;
1244
1245 /* create new sock for complete connection */
1246 newsk = unix_create1(sock_net(sk), NULL);
1247 if (newsk == NULL)
1248 goto out;
1249
1250 /* Allocate skb for sending to listening sock */
1251 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1252 if (skb == NULL)
1253 goto out;
1254
1255 restart:
1256 /* Find listening sock. */
1257 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1258 if (!other)
1259 goto out;
1260
1261 /* Latch state of peer */
1262 unix_state_lock(other);
1263
1264 /* Apparently VFS overslept socket death. Retry. */
1265 if (sock_flag(other, SOCK_DEAD)) {
1266 unix_state_unlock(other);
1267 sock_put(other);
1268 goto restart;
1269 }
1270
1271 err = -ECONNREFUSED;
1272 if (other->sk_state != TCP_LISTEN)
1273 goto out_unlock;
1274 if (other->sk_shutdown & RCV_SHUTDOWN)
1275 goto out_unlock;
1276
1277 if (unix_recvq_full(other)) {
1278 err = -EAGAIN;
1279 if (!timeo)
1280 goto out_unlock;
1281
1282 timeo = unix_wait_for_peer(other, timeo);
1283
1284 err = sock_intr_errno(timeo);
1285 if (signal_pending(current))
1286 goto out;
1287 sock_put(other);
1288 goto restart;
1289 }
1290
1291 /* Latch our state.
1292
1293 It is tricky place. We need to grab our state lock and cannot
1294 drop lock on peer. It is dangerous because deadlock is
1295 possible. Connect to self case and simultaneous
1296 attempt to connect are eliminated by checking socket
1297 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1298 check this before attempt to grab lock.
1299
1300 Well, and we have to recheck the state after socket locked.
1301 */
1302 st = sk->sk_state;
1303
1304 switch (st) {
1305 case TCP_CLOSE:
1306 /* This is ok... continue with connect */
1307 break;
1308 case TCP_ESTABLISHED:
1309 /* Socket is already connected */
1310 err = -EISCONN;
1311 goto out_unlock;
1312 default:
1313 err = -EINVAL;
1314 goto out_unlock;
1315 }
1316
1317 unix_state_lock_nested(sk);
1318
1319 if (sk->sk_state != st) {
1320 unix_state_unlock(sk);
1321 unix_state_unlock(other);
1322 sock_put(other);
1323 goto restart;
1324 }
1325
1326 err = security_unix_stream_connect(sk, other, newsk);
1327 if (err) {
1328 unix_state_unlock(sk);
1329 goto out_unlock;
1330 }
1331
1332 /* The way is open! Fastly set all the necessary fields... */
1333
1334 sock_hold(sk);
1335 unix_peer(newsk) = sk;
1336 newsk->sk_state = TCP_ESTABLISHED;
1337 newsk->sk_type = sk->sk_type;
1338 init_peercred(newsk);
1339 newu = unix_sk(newsk);
1340 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1341 otheru = unix_sk(other);
1342
1343 /* copy address information from listening to new sock*/
1344 if (otheru->addr) {
1345 atomic_inc(&otheru->addr->refcnt);
1346 newu->addr = otheru->addr;
1347 }
1348 if (otheru->path.dentry) {
1349 path_get(&otheru->path);
1350 newu->path = otheru->path;
1351 }
1352
1353 /* Set credentials */
1354 copy_peercred(sk, other);
1355
1356 sock->state = SS_CONNECTED;
1357 sk->sk_state = TCP_ESTABLISHED;
1358 sock_hold(newsk);
1359
1360 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1361 unix_peer(sk) = newsk;
1362
1363 unix_state_unlock(sk);
1364
1365 /* take ten and and send info to listening sock */
1366 spin_lock(&other->sk_receive_queue.lock);
1367 __skb_queue_tail(&other->sk_receive_queue, skb);
1368 spin_unlock(&other->sk_receive_queue.lock);
1369 unix_state_unlock(other);
1370
1371 #ifdef CONFIG_MTK_NET_LOGGING
1372 if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
1373 {
1374 printk(KERN_INFO "[mtk_net][socket]unix_stream_connect[%lu ]: connect [%s] other[%lu] \n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
1375 }
1376 #endif
1377
1378 other->sk_data_ready(other, 0);
1379 sock_put(other);
1380
1381 return 0;
1382
1383 out_unlock:
1384 if (other)
1385 unix_state_unlock(other);
1386
1387 out:
1388 kfree_skb(skb);
1389 if (newsk)
1390 unix_release_sock(newsk, 0);
1391 if (other)
1392 sock_put(other);
1393
1394 return err;
1395 }
1396
1397 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1398 {
1399 struct sock *ska = socka->sk, *skb = sockb->sk;
1400
1401 /* Join our sockets back to back */
1402 sock_hold(ska);
1403 sock_hold(skb);
1404 unix_peer(ska) = skb;
1405 unix_peer(skb) = ska;
1406 init_peercred(ska);
1407 init_peercred(skb);
1408
1409 if (ska->sk_type != SOCK_DGRAM) {
1410 ska->sk_state = TCP_ESTABLISHED;
1411 skb->sk_state = TCP_ESTABLISHED;
1412 socka->state = SS_CONNECTED;
1413 sockb->state = SS_CONNECTED;
1414 }
1415 return 0;
1416 }
1417
1418 static void unix_sock_inherit_flags(const struct socket *old,
1419 struct socket *new)
1420 {
1421 if (test_bit(SOCK_PASSCRED, &old->flags))
1422 set_bit(SOCK_PASSCRED, &new->flags);
1423 if (test_bit(SOCK_PASSSEC, &old->flags))
1424 set_bit(SOCK_PASSSEC, &new->flags);
1425 }
1426
1427 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1428 {
1429 struct sock *sk = sock->sk;
1430 struct sock *tsk;
1431 struct sk_buff *skb;
1432 int err;
1433
1434 err = -EOPNOTSUPP;
1435 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1436 goto out;
1437
1438 err = -EINVAL;
1439 if (sk->sk_state != TCP_LISTEN)
1440 goto out;
1441
1442 /* If socket state is TCP_LISTEN it cannot change (for now...),
1443 * so that no locks are necessary.
1444 */
1445
1446 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1447 if (!skb) {
1448 /* This means receive shutdown. */
1449 if (err == 0)
1450 err = -EINVAL;
1451 goto out;
1452 }
1453
1454 tsk = skb->sk;
1455 skb_free_datagram(sk, skb);
1456 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1457
1458 /* attach accepted sock to socket */
1459 unix_state_lock(tsk);
1460 newsock->state = SS_CONNECTED;
1461 unix_sock_inherit_flags(sock, newsock);
1462 sock_graft(tsk, newsock);
1463 unix_state_unlock(tsk);
1464
1465 return 0;
1466
1467 out:
1468
1469 return err;
1470 }
1471
1472
1473 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1474 {
1475 struct sock *sk = sock->sk;
1476 struct unix_sock *u;
1477 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1478 int err = 0;
1479
1480 if (peer) {
1481 sk = unix_peer_get(sk);
1482
1483 err = -ENOTCONN;
1484 if (!sk)
1485 goto out;
1486 err = 0;
1487 } else {
1488 sock_hold(sk);
1489 }
1490
1491 u = unix_sk(sk);
1492 unix_state_lock(sk);
1493 if (!u->addr) {
1494 sunaddr->sun_family = AF_UNIX;
1495 sunaddr->sun_path[0] = 0;
1496 *uaddr_len = sizeof(short);
1497 } else {
1498 struct unix_address *addr = u->addr;
1499
1500 *uaddr_len = addr->len;
1501 memcpy(sunaddr, addr->name, *uaddr_len);
1502 }
1503 unix_state_unlock(sk);
1504 sock_put(sk);
1505 out:
1506 return err;
1507 }
1508
1509 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1510 {
1511 int i;
1512
1513 scm->fp = UNIXCB(skb).fp;
1514 UNIXCB(skb).fp = NULL;
1515
1516 for (i = scm->fp->count-1; i >= 0; i--)
1517 unix_notinflight(scm->fp->fp[i]);
1518 }
1519
1520 static void unix_destruct_scm(struct sk_buff *skb)
1521 {
1522 struct scm_cookie scm;
1523 memset(&scm, 0, sizeof(scm));
1524 scm.pid = UNIXCB(skb).pid;
1525 if (UNIXCB(skb).fp)
1526 unix_detach_fds(&scm, skb);
1527
1528 /* Alas, it calls VFS */
1529 /* So fscking what? fput() had been SMP-safe since the last Summer */
1530 scm_destroy(&scm);
1531 sock_wfree(skb);
1532 }
1533
1534 #define MAX_RECURSION_LEVEL 4
1535
1536 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1537 {
1538 int i;
1539 unsigned char max_level = 0;
1540 int unix_sock_count = 0;
1541
1542 for (i = scm->fp->count - 1; i >= 0; i--) {
1543 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1544
1545 if (sk) {
1546 unix_sock_count++;
1547 max_level = max(max_level,
1548 unix_sk(sk)->recursion_level);
1549 }
1550 }
1551 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1552 return -ETOOMANYREFS;
1553
1554 /*
1555 * Need to duplicate file references for the sake of garbage
1556 * collection. Otherwise a socket in the fps might become a
1557 * candidate for GC while the skb is not yet queued.
1558 */
1559 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1560 if (!UNIXCB(skb).fp)
1561 return -ENOMEM;
1562
1563 if (unix_sock_count) {
1564 for (i = scm->fp->count - 1; i >= 0; i--)
1565 unix_inflight(scm->fp->fp[i]);
1566 }
1567 return max_level;
1568 }
1569
1570 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1571 {
1572 int err = 0;
1573
1574 UNIXCB(skb).pid = get_pid(scm->pid);
1575 UNIXCB(skb).uid = scm->creds.uid;
1576 UNIXCB(skb).gid = scm->creds.gid;
1577 UNIXCB(skb).fp = NULL;
1578 if (scm->fp && send_fds)
1579 err = unix_attach_fds(scm, skb);
1580
1581 skb->destructor = unix_destruct_scm;
1582 return err;
1583 }
1584
1585 /*
1586 * Some apps rely on write() giving SCM_CREDENTIALS
1587 * We include credentials if source or destination socket
1588 * asserted SOCK_PASSCRED.
1589 */
1590 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1591 const struct sock *other)
1592 {
1593 if (UNIXCB(skb).pid)
1594 return;
1595 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1596 !other->sk_socket ||
1597 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1598 UNIXCB(skb).pid = get_pid(task_tgid(current));
1599 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1600 }
1601 }
1602
1603 /*
1604 * Send AF_UNIX data.
1605 */
1606
1607 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1608 struct msghdr *msg, size_t len)
1609 {
1610 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1611 struct sock *sk = sock->sk;
1612 struct net *net = sock_net(sk);
1613 struct unix_sock *u = unix_sk(sk);
1614 struct sockaddr_un *sunaddr = msg->msg_name;
1615 struct sock *other = NULL;
1616 int namelen = 0; /* fake GCC */
1617 int err;
1618 unsigned int hash;
1619 struct sk_buff *skb;
1620 long timeo;
1621 struct scm_cookie tmp_scm;
1622 int max_level;
1623 int data_len = 0;
1624 int sk_locked;
1625 <<<<<<< HEAD
1626
1627 =======
1628
1629 >>>>>>> v3.10.95
1630 if (NULL == siocb->scm)
1631 siocb->scm = &tmp_scm;
1632 wait_for_unix_gc();
1633 err = scm_send(sock, msg, siocb->scm, false);
1634 if (err < 0)
1635 return err;
1636
1637 err = -EOPNOTSUPP;
1638 if (msg->msg_flags&MSG_OOB)
1639 goto out;
1640
1641 if (msg->msg_namelen) {
1642 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1643 if (err < 0)
1644 goto out;
1645 namelen = err;
1646 } else {
1647 sunaddr = NULL;
1648 err = -ENOTCONN;
1649 other = unix_peer_get(sk);
1650 if (!other)
1651 goto out;
1652 }
1653
1654 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1655 && (err = unix_autobind(sock)) != 0)
1656 goto out;
1657
1658 err = -EMSGSIZE;
1659 if (len > sk->sk_sndbuf - 32)
1660 goto out;
1661
1662 if (len > SKB_MAX_ALLOC)
1663 data_len = min_t(size_t,
1664 len - SKB_MAX_ALLOC,
1665 MAX_SKB_FRAGS * PAGE_SIZE);
1666
1667 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1668 msg->msg_flags & MSG_DONTWAIT, &err);
1669 if (skb == NULL)
1670 goto out;
1671
1672 err = unix_scm_to_skb(siocb->scm, skb, true);
1673 if (err < 0)
1674 goto out_free;
1675 max_level = err + 1;
1676 unix_get_secdata(siocb->scm, skb);
1677
1678 skb_put(skb, len - data_len);
1679 skb->data_len = data_len;
1680 skb->len = len;
1681 err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
1682 if (err)
1683 goto out_free;
1684
1685 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1686
1687 restart:
1688 if (!other) {
1689 err = -ECONNRESET;
1690 if (sunaddr == NULL)
1691 goto out_free;
1692
1693 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1694 hash, &err);
1695 if (other == NULL)
1696 goto out_free;
1697 }
1698
1699 if (sk_filter(other, skb) < 0) {
1700 /* Toss the packet but do not return any error to the sender */
1701 err = len;
1702 goto out_free;
1703 }
1704
1705 sk_locked = 0;
1706 unix_state_lock(other);
1707 restart_locked:
1708 err = -EPERM;
1709 if (!unix_may_send(sk, other))
1710 goto out_unlock;
1711
1712 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1713 /*
1714 * Check with 1003.1g - what should
1715 * datagram error
1716 */
1717 unix_state_unlock(other);
1718 sock_put(other);
1719
1720 if (!sk_locked)
1721 <<<<<<< HEAD
1722 unix_state_lock(sk);
1723 =======
1724 unix_state_lock(sk);
1725
1726 err = 0;
1727 >>>>>>> v3.10.95
1728 if (unix_peer(sk) == other) {
1729 unix_peer(sk) = NULL;
1730 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1731
1732 unix_state_unlock(sk);
1733
1734 unix_dgram_disconnected(sk, other);
1735 sock_put(other);
1736 err = -ECONNREFUSED;
1737 } else {
1738 unix_state_unlock(sk);
1739 }
1740
1741 other = NULL;
1742 if (err)
1743 goto out_free;
1744 goto restart;
1745 }
1746
1747 err = -EPIPE;
1748 if (other->sk_shutdown & RCV_SHUTDOWN)
1749 goto out_unlock;
1750
1751 if (sk->sk_type != SOCK_SEQPACKET) {
1752 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1753 if (err)
1754 goto out_unlock;
1755 }
1756
1757 <<<<<<< HEAD
1758 /* other == sk && unix_peer(other) != sk if
1759 * - unix_peer(sk) == NULL, destination address bound to sk
1760 * - unix_peer(sk) == sk by time of get but disconnected before lock
1761 */
1762 if (other != sk &&
1763 unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1764 =======
1765 if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1766 >>>>>>> v3.10.95
1767 if (timeo) {
1768 timeo = unix_wait_for_peer(other, timeo);
1769
1770 err = sock_intr_errno(timeo);
1771 if (signal_pending(current))
1772 goto out_free;
1773
1774 goto restart;
1775 }
1776
1777 if (!sk_locked) {
1778 unix_state_unlock(other);
1779 unix_state_double_lock(sk, other);
1780 }
1781
1782 if (unix_peer(sk) != other ||
1783 unix_dgram_peer_wake_me(sk, other)) {
1784 err = -EAGAIN;
1785 sk_locked = 1;
1786 goto out_unlock;
1787 }
1788
1789 if (!sk_locked) {
1790 sk_locked = 1;
1791 goto restart_locked;
1792 }
1793 }
1794
1795 if (unlikely(sk_locked))
1796 unix_state_unlock(sk);
1797
1798 if (sock_flag(other, SOCK_RCVTSTAMP))
1799 __net_timestamp(skb);
1800 maybe_add_creds(skb, sock, other);
1801 skb_queue_tail(&other->sk_receive_queue, skb);
1802 if (max_level > unix_sk(other)->recursion_level)
1803 unix_sk(other)->recursion_level = max_level;
1804 unix_state_unlock(other);
1805 other->sk_data_ready(other, len);
1806 sock_put(other);
1807 scm_destroy(siocb->scm);
1808
1809 return len;
1810
1811 out_unlock:
1812 if (sk_locked)
1813 unix_state_unlock(sk);
1814 unix_state_unlock(other);
1815 out_free:
1816 kfree_skb(skb);
1817 out:
1818 if (other)
1819 sock_put(other);
1820 scm_destroy(siocb->scm);
1821
1822 return err;
1823 }
1824
1825
1826 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1827 struct msghdr *msg, size_t len)
1828 {
1829 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1830 struct sock *sk = sock->sk;
1831 struct sock *other = NULL;
1832 int err, size;
1833 struct sk_buff *skb;
1834 int sent = 0;
1835 struct scm_cookie tmp_scm;
1836 bool fds_sent = false;
1837 int max_level;
1838
1839 if (NULL == siocb->scm)
1840 siocb->scm = &tmp_scm;
1841
1842 wait_for_unix_gc();
1843 err = scm_send(sock, msg, siocb->scm, false);
1844 if (err < 0)
1845 return err;
1846
1847 err = -EOPNOTSUPP;
1848 if (msg->msg_flags&MSG_OOB)
1849 goto out_err;
1850
1851 if (msg->msg_namelen) {
1852 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1853 goto out_err;
1854 } else {
1855 err = -ENOTCONN;
1856 other = unix_peer(sk);
1857 if (!other)
1858 goto out_err;
1859 }
1860
1861 if (sk->sk_shutdown & SEND_SHUTDOWN)
1862 goto pipe_err;
1863
1864 while (sent < len) {
1865 /*
1866 * Optimisation for the fact that under 0.01% of X
1867 * messages typically need breaking up.
1868 */
1869
1870 size = len-sent;
1871
1872 /* Keep two messages in the pipe so it schedules better */
1873 if (size > ((sk->sk_sndbuf >> 1) - 64))
1874 size = (sk->sk_sndbuf >> 1) - 64;
1875
1876 if (size > SKB_MAX_ALLOC)
1877 size = SKB_MAX_ALLOC;
1878
1879 /*
1880 * Grab a buffer
1881 */
1882
1883 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1884 &err);
1885
1886
1887 if (skb == NULL)
1888 goto out_err;
1889
1890 /*
1891 * If you pass two values to the sock_alloc_send_skb
1892 * it tries to grab the large buffer with GFP_NOFS
1893 * (which can fail easily), and if it fails grab the
1894 * fallback size buffer which is under a page and will
1895 * succeed. [Alan]
1896 */
1897 size = min_t(int, size, skb_tailroom(skb));
1898
1899
1900 /* Only send the fds in the first buffer */
1901 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1902 if (err < 0) {
1903 kfree_skb(skb);
1904 goto out_err;
1905 }
1906 max_level = err + 1;
1907 fds_sent = true;
1908
1909 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1910 if (err) {
1911 kfree_skb(skb);
1912 goto out_err;
1913 }
1914
1915 unix_state_lock(other);
1916
1917 if (sock_flag(other, SOCK_DEAD) ||
1918 (other->sk_shutdown & RCV_SHUTDOWN))
1919 {
1920 if( other->sk_socket )
1921 {
1922 if(sk->sk_socket)
1923 {
1924
1925 #ifdef CONFIG_MTK_NET_LOGGING
1926 printk(KERN_INFO " [mtk_net][unix]: sendmsg[%lu:%lu]:peer close\n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
1927 #endif
1928 }
1929 else{
1930 #ifdef CONFIG_MTK_NET_LOGGING
1931 printk(KERN_INFO " [mtk_net][unix]: sendmsg[null:%lu]:peer close\n" ,SOCK_INODE(other->sk_socket)->i_ino);
1932 #endif
1933 }
1934
1935 }
1936 else
1937 {
1938 #ifdef CONFIG_MTK_NET_LOGGING
1939 printk(KERN_INFO " [mtk_net][unix]: sendmsg:peer close \n" );
1940 #endif
1941 }
1942
1943
1944 goto pipe_err_free;
1945 }
1946
1947 maybe_add_creds(skb, sock, other);
1948 skb_queue_tail(&other->sk_receive_queue, skb);
1949 if (max_level > unix_sk(other)->recursion_level)
1950 unix_sk(other)->recursion_level = max_level;
1951 unix_state_unlock(other);
1952 other->sk_data_ready(other, size);
1953 sent += size;
1954 }
1955
1956 scm_destroy(siocb->scm);
1957 siocb->scm = NULL;
1958
1959 return sent;
1960
1961 pipe_err_free:
1962 unix_state_unlock(other);
1963 kfree_skb(skb);
1964 pipe_err:
1965 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1966 send_sig(SIGPIPE, current, 0);
1967 err = -EPIPE;
1968 out_err:
1969 scm_destroy(siocb->scm);
1970 siocb->scm = NULL;
1971
1972 return sent ? : err;
1973 }
1974
1975 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1976 struct msghdr *msg, size_t len)
1977 {
1978 int err;
1979 struct sock *sk = sock->sk;
1980
1981 err = sock_error(sk);
1982 if (err)
1983 return err;
1984
1985 if (sk->sk_state != TCP_ESTABLISHED)
1986 return -ENOTCONN;
1987
1988 if (msg->msg_namelen)
1989 msg->msg_namelen = 0;
1990
1991 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1992 }
1993
1994 static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
1995 struct msghdr *msg, size_t size,
1996 int flags)
1997 {
1998 struct sock *sk = sock->sk;
1999
2000 if (sk->sk_state != TCP_ESTABLISHED)
2001 return -ENOTCONN;
2002
2003 return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
2004 }
2005
2006 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2007 {
2008 struct unix_sock *u = unix_sk(sk);
2009
2010 if (u->addr) {
2011 msg->msg_namelen = u->addr->len;
2012 memcpy(msg->msg_name, u->addr->name, u->addr->len);
2013 }
2014 }
2015
2016 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
2017 struct msghdr *msg, size_t size,
2018 int flags)
2019 {
2020 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
2021 struct scm_cookie tmp_scm;
2022 struct sock *sk = sock->sk;
2023 struct unix_sock *u = unix_sk(sk);
2024 int noblock = flags & MSG_DONTWAIT;
2025 struct sk_buff *skb;
2026 int err;
2027 int peeked, skip;
2028
2029 err = -EOPNOTSUPP;
2030 if (flags&MSG_OOB)
2031 goto out;
2032
2033 mutex_lock(&u->readlock);
2034
2035 skip = sk_peek_offset(sk, flags);
2036
2037 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
2038 if (!skb) {
2039 unix_state_lock(sk);
2040 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2041 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2042 (sk->sk_shutdown & RCV_SHUTDOWN))
2043 err = 0;
2044 unix_state_unlock(sk);
2045 goto out_unlock;
2046 }
2047
2048 wake_up_interruptible_sync_poll(&u->peer_wait,
2049 POLLOUT | POLLWRNORM | POLLWRBAND);
2050
2051 if (msg->msg_name)
2052 unix_copy_addr(msg, skb->sk);
2053
2054 if (size > skb->len - skip)
2055 size = skb->len - skip;
2056 else if (size < skb->len - skip)
2057 msg->msg_flags |= MSG_TRUNC;
2058
2059 err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
2060 if (err)
2061 goto out_free;
2062
2063 if (sock_flag(sk, SOCK_RCVTSTAMP))
2064 __sock_recv_timestamp(msg, sk, skb);
2065
2066 if (!siocb->scm) {
2067 siocb->scm = &tmp_scm;
2068 memset(&tmp_scm, 0, sizeof(tmp_scm));
2069 }
2070 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2071 unix_set_secdata(siocb->scm, skb);
2072
2073 if (!(flags & MSG_PEEK)) {
2074 if (UNIXCB(skb).fp)
2075 unix_detach_fds(siocb->scm, skb);
2076
2077 sk_peek_offset_bwd(sk, skb->len);
2078 } else {
2079 /* It is questionable: on PEEK we could:
2080 - do not return fds - good, but too simple 8)
2081 - return fds, and do not return them on read (old strategy,
2082 apparently wrong)
2083 - clone fds (I chose it for now, it is the most universal
2084 solution)
2085
2086 POSIX 1003.1g does not actually define this clearly
2087 at all. POSIX 1003.1g doesn't define a lot of things
2088 clearly however!
2089
2090 */
2091
2092 sk_peek_offset_fwd(sk, size);
2093
2094 if (UNIXCB(skb).fp)
2095 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2096 }
2097 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2098
2099 scm_recv(sock, msg, siocb->scm, flags);
2100
2101 out_free:
2102 skb_free_datagram(sk, skb);
2103 out_unlock:
2104 mutex_unlock(&u->readlock);
2105 out:
2106
2107 return err;
2108 }
2109
2110 /*
2111 * Sleep until more data has arrived. But check for races..
2112 */
2113 static long unix_stream_data_wait(struct sock *sk, long timeo,
2114 struct sk_buff *last)
2115 {
2116 DEFINE_WAIT(wait);
2117
2118 unix_state_lock(sk);
2119
2120 for (;;) {
2121 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2122
2123 if (skb_peek_tail(&sk->sk_receive_queue) != last ||
2124 sk->sk_err ||
2125 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2126 signal_pending(current) ||
2127 !timeo)
2128 break;
2129
2130 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2131 unix_state_unlock(sk);
2132 timeo = freezable_schedule_timeout(timeo);
2133 unix_state_lock(sk);
2134
2135 if (sock_flag(sk, SOCK_DEAD))
2136 break;
2137
2138 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2139 }
2140
2141 finish_wait(sk_sleep(sk), &wait);
2142 unix_state_unlock(sk);
2143 return timeo;
2144 }
2145
2146 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
2147 struct msghdr *msg, size_t size,
2148 int flags)
2149 {
2150 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
2151 struct scm_cookie tmp_scm;
2152 struct sock *sk = sock->sk;
2153 struct unix_sock *u = unix_sk(sk);
2154 struct sockaddr_un *sunaddr = msg->msg_name;
2155 int copied = 0;
2156 int noblock = flags & MSG_DONTWAIT;
2157 int check_creds = 0;
2158 int target;
2159 int err = 0;
2160 long timeo;
2161 int skip;
2162 struct sock * other = unix_peer(sk);
2163
2164 err = -EINVAL;
2165 if (sk->sk_state != TCP_ESTABLISHED)
2166 goto out;
2167
2168 err = -EOPNOTSUPP;
2169 if (flags&MSG_OOB)
2170 goto out;
2171
2172 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
2173 timeo = sock_rcvtimeo(sk, noblock);
2174
2175 /* Lock the socket to prevent queue disordering
2176 * while sleeps in memcpy_tomsg
2177 */
2178
2179 if (!siocb->scm) {
2180 siocb->scm = &tmp_scm;
2181 memset(&tmp_scm, 0, sizeof(tmp_scm));
2182 }
2183
2184 err = mutex_lock_interruptible(&u->readlock);
2185 if (unlikely(err)) {
2186 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2187 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2188 */
2189 err = noblock ? -EAGAIN : -ERESTARTSYS;
2190 goto out;
2191 }
2192
2193 do {
2194 int chunk;
2195 struct sk_buff *skb, *last;
2196
2197 unix_state_lock(sk);
2198 if (sock_flag(sk, SOCK_DEAD)) {
2199 err = -ECONNRESET;
2200 goto unlock;
2201 }
2202 last = skb = skb_peek(&sk->sk_receive_queue);
2203 again:
2204 if (skb == NULL) {
2205 unix_sk(sk)->recursion_level = 0;
2206 if (copied >= target)
2207 goto unlock;
2208
2209 /*
2210 * POSIX 1003.1g mandates this order.
2211 */
2212
2213 err = sock_error(sk);
2214 if (err)
2215 goto unlock;
2216 if (sk->sk_shutdown & RCV_SHUTDOWN)
2217 {
2218 if(sk && sk->sk_socket )
2219 {
2220 if(other && other->sk_socket ){
2221 #ifdef CONFIG_MTK_NET_LOGGING
2222
2223 printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to peer shutdown \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
2224 #endif
2225 }else{
2226 #ifdef CONFIG_MTK_NET_LOGGING
2227 printk(KERN_INFO "[mtk_net][unix]: recvmsg[%lu:null]:exit read due to peer shutdown \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
2228 #endif
2229 }
2230 }
2231 else{
2232 #ifdef CONFIG_MTK_NET_LOGGING
2233 printk(KERN_INFO " [mtk_net][unix]: recvmsg: exit read due to peer shutdown \n" );
2234 #endif
2235 }
2236 goto unlock;
2237 }
2238 unix_state_unlock(sk);
2239 err = -EAGAIN;
2240 if (!timeo)
2241 break;
2242 mutex_unlock(&u->readlock);
2243
2244 timeo = unix_stream_data_wait(sk, timeo, last);
2245 if (!timeo)
2246 {
2247 if(sk && sk->sk_socket )
2248 {
2249 if(other && other->sk_socket ){
2250 #ifdef CONFIG_MTK_NET_LOGGING
2251 printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to timeout \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
2252 #endif
2253 }else{
2254 #ifdef CONFIG_MTK_NET_LOGGING
2255 printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:null]:exit read due to timeout \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
2256 #endif
2257 }
2258 }
2259 else
2260 {
2261 #ifdef CONFIG_MTK_NET_LOGGING
2262 printk(KERN_INFO " [mtk_net][unix]: recvmsg:exit read due to timeout \n" );
2263 #endif
2264 }
2265
2266 }
2267
2268 if (signal_pending(current)) {
2269 err = sock_intr_errno(timeo);
2270 goto out;
2271 }
2272
2273 mutex_lock(&u->readlock);
2274 continue;
2275 unlock:
2276 unix_state_unlock(sk);
2277 break;
2278 }
2279
2280 skip = sk_peek_offset(sk, flags);
2281 while (skip >= skb->len) {
2282 skip -= skb->len;
2283 last = skb;
2284 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2285 if (!skb)
2286 goto again;
2287 }
2288
2289 unix_state_unlock(sk);
2290
2291 if (check_creds) {
2292 /* Never glue messages from different writers */
2293 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
2294 !uid_eq(UNIXCB(skb).uid, siocb->scm->creds.uid) ||
2295 !gid_eq(UNIXCB(skb).gid, siocb->scm->creds.gid))
2296 break;
2297 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2298 /* Copy credentials */
2299 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2300 check_creds = 1;
2301 }
2302
2303 /* Copy address just once */
2304 if (sunaddr) {
2305 unix_copy_addr(msg, skb->sk);
2306 sunaddr = NULL;
2307 }
2308
2309 chunk = min_t(unsigned int, skb->len - skip, size);
2310 if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
2311 if (copied == 0)
2312 copied = -EFAULT;
2313 break;
2314 }
2315 copied += chunk;
2316 size -= chunk;
2317
2318 /* Mark read part of skb as used */
2319 if (!(flags & MSG_PEEK)) {
2320 skb_pull(skb, chunk);
2321
2322 sk_peek_offset_bwd(sk, chunk);
2323
2324 if (UNIXCB(skb).fp)
2325 unix_detach_fds(siocb->scm, skb);
2326
2327 if (skb->len)
2328 break;
2329
2330 skb_unlink(skb, &sk->sk_receive_queue);
2331 consume_skb(skb);
2332
2333 if (siocb->scm->fp)
2334 break;
2335 } else {
2336 /* It is questionable, see note in unix_dgram_recvmsg.
2337 */
2338 if (UNIXCB(skb).fp)
2339 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2340
2341 if (skip) {
2342 sk_peek_offset_fwd(sk, chunk);
2343 skip -= chunk;
2344 }
2345
2346 if (UNIXCB(skb).fp)
2347 break;
2348
2349 last = skb;
2350 unix_state_lock(sk);
2351 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2352 if (skb)
2353 goto again;
2354 unix_state_unlock(sk);
2355 break;
2356 }
2357 } while (size);
2358
2359 mutex_unlock(&u->readlock);
2360 scm_recv(sock, msg, siocb->scm, flags);
2361 out:
2362
2363 return copied ? : err;
2364 }
2365
2366 static int unix_shutdown(struct socket *sock, int mode)
2367 {
2368 struct sock *sk = sock->sk;
2369 struct sock *other;
2370
2371 if (mode < SHUT_RD || mode > SHUT_RDWR)
2372 return -EINVAL;
2373 /* This maps:
2374 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2375 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2376 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2377 */
2378 ++mode;
2379
2380 unix_state_lock(sk);
2381 sk->sk_shutdown |= mode;
2382 other = unix_peer(sk);
2383 if (other)
2384 sock_hold(other);
2385 unix_state_unlock(sk);
2386 sk->sk_state_change(sk);
2387
2388 if (other &&
2389 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2390
2391 int peer_mode = 0;
2392
2393 if (mode&RCV_SHUTDOWN)
2394 peer_mode |= SEND_SHUTDOWN;
2395 if (mode&SEND_SHUTDOWN)
2396 peer_mode |= RCV_SHUTDOWN;
2397 unix_state_lock(other);
2398 other->sk_shutdown |= peer_mode;
2399 unix_state_unlock(other);
2400 other->sk_state_change(other);
2401 if (peer_mode == SHUTDOWN_MASK)
2402 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2403 else if (peer_mode & RCV_SHUTDOWN)
2404 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2405 }
2406 if (other)
2407 sock_put(other);
2408
2409 return 0;
2410 }
2411
2412 long unix_inq_len(struct sock *sk)
2413 {
2414 struct sk_buff *skb;
2415 long amount = 0;
2416
2417 if (sk->sk_state == TCP_LISTEN)
2418 return -EINVAL;
2419
2420 spin_lock(&sk->sk_receive_queue.lock);
2421 if (sk->sk_type == SOCK_STREAM ||
2422 sk->sk_type == SOCK_SEQPACKET) {
2423 skb_queue_walk(&sk->sk_receive_queue, skb)
2424 amount += skb->len;
2425 } else {
2426 skb = skb_peek(&sk->sk_receive_queue);
2427 if (skb)
2428 amount = skb->len;
2429 }
2430 spin_unlock(&sk->sk_receive_queue.lock);
2431
2432 return amount;
2433 }
2434 EXPORT_SYMBOL_GPL(unix_inq_len);
2435
2436 long unix_outq_len(struct sock *sk)
2437 {
2438 return sk_wmem_alloc_get(sk);
2439 }
2440 EXPORT_SYMBOL_GPL(unix_outq_len);
2441
2442 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2443 {
2444 struct sock *sk = sock->sk;
2445 long amount = 0;
2446 int err;
2447
2448 switch (cmd) {
2449 case SIOCOUTQ:
2450 amount = unix_outq_len(sk);
2451 err = put_user(amount, (int __user *)arg);
2452 break;
2453 case SIOCINQ:
2454 amount = unix_inq_len(sk);
2455 if (amount < 0)
2456 err = amount;
2457 else
2458 err = put_user(amount, (int __user *)arg);
2459 break;
2460 default:
2461 err = -ENOIOCTLCMD;
2462 break;
2463 }
2464 return err;
2465 }
2466
2467 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2468 {
2469 struct sock *sk = sock->sk;
2470 unsigned int mask;
2471
2472 sock_poll_wait(file, sk_sleep(sk), wait);
2473 mask = 0;
2474
2475 /* exceptional events? */
2476 if (sk->sk_err)
2477 mask |= POLLERR;
2478 if (sk->sk_shutdown == SHUTDOWN_MASK)
2479 mask |= POLLHUP;
2480 if (sk->sk_shutdown & RCV_SHUTDOWN)
2481 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2482
2483 /* readable? */
2484 if (!skb_queue_empty(&sk->sk_receive_queue))
2485 mask |= POLLIN | POLLRDNORM;
2486
2487 /* Connection-based need to check for termination and startup */
2488 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2489 sk->sk_state == TCP_CLOSE)
2490 mask |= POLLHUP;
2491
2492 /*
2493 * we set writable also when the other side has shut down the
2494 * connection. This prevents stuck sockets.
2495 */
2496 if (unix_writable(sk))
2497 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2498
2499 return mask;
2500 }
2501
2502 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2503 poll_table *wait)
2504 {
2505 struct sock *sk = sock->sk, *other;
2506 unsigned int mask, writable;
2507
2508 sock_poll_wait(file, sk_sleep(sk), wait);
2509 mask = 0;
2510
2511 /* exceptional events? */
2512 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2513 mask |= POLLERR |
2514 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
2515
2516 if (sk->sk_shutdown & RCV_SHUTDOWN)
2517 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2518 if (sk->sk_shutdown == SHUTDOWN_MASK)
2519 mask |= POLLHUP;
2520
2521 /* readable? */
2522 if (!skb_queue_empty(&sk->sk_receive_queue))
2523 mask |= POLLIN | POLLRDNORM;
2524
2525 /* Connection-based need to check for termination and startup */
2526 if (sk->sk_type == SOCK_SEQPACKET) {
2527 if (sk->sk_state == TCP_CLOSE)
2528 mask |= POLLHUP;
2529 /* connection hasn't started yet? */
2530 if (sk->sk_state == TCP_SYN_SENT)
2531 {
2532
2533 return mask;
2534 }
2535 }
2536
2537 /* No write status requested, avoid expensive OUT tests. */
2538 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2539 {
2540 return mask;
2541 }
2542
2543 writable = unix_writable(sk);
2544 if (writable) {
2545 unix_state_lock(sk);
2546
2547 other = unix_peer(sk);
2548 if (other && unix_peer(other) != sk &&
2549 unix_recvq_full(other) &&
2550 unix_dgram_peer_wake_me(sk, other))
2551 writable = 0;
2552
2553 unix_state_unlock(sk);
2554 }
2555
2556 if (writable)
2557 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2558 else
2559 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2560
2561 return mask;
2562 }
2563
2564 #ifdef CONFIG_PROC_FS
2565
2566 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2567
2568 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2569 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2570 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2571
2572 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2573 {
2574 unsigned long offset = get_offset(*pos);
2575 unsigned long bucket = get_bucket(*pos);
2576 struct sock *sk;
2577 unsigned long count = 0;
2578
2579 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2580 if (sock_net(sk) != seq_file_net(seq))
2581 continue;
2582 if (++count == offset)
2583 break;
2584 }
2585
2586 return sk;
2587 }
2588
2589 static struct sock *unix_next_socket(struct seq_file *seq,
2590 struct sock *sk,
2591 loff_t *pos)
2592 {
2593 unsigned long bucket;
2594
2595 while (sk > (struct sock *)SEQ_START_TOKEN) {
2596 sk = sk_next(sk);
2597 if (!sk)
2598 goto next_bucket;
2599 if (sock_net(sk) == seq_file_net(seq))
2600 return sk;
2601 }
2602
2603 do {
2604 sk = unix_from_bucket(seq, pos);
2605 if (sk)
2606 return sk;
2607
2608 next_bucket:
2609 bucket = get_bucket(*pos) + 1;
2610 *pos = set_bucket_offset(bucket, 1);
2611 } while (bucket < ARRAY_SIZE(unix_socket_table));
2612
2613 return NULL;
2614 }
2615
2616 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2617 __acquires(unix_table_lock)
2618 {
2619 spin_lock(&unix_table_lock);
2620
2621 if (!*pos)
2622 return SEQ_START_TOKEN;
2623
2624 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2625 return NULL;
2626
2627 return unix_next_socket(seq, NULL, pos);
2628 }
2629
2630 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2631 {
2632 ++*pos;
2633 return unix_next_socket(seq, v, pos);
2634 }
2635
2636 static void unix_seq_stop(struct seq_file *seq, void *v)
2637 __releases(unix_table_lock)
2638 {
2639 spin_unlock(&unix_table_lock);
2640 }
2641
2642 static int unix_seq_show(struct seq_file *seq, void *v)
2643 {
2644
2645 if (v == SEQ_START_TOKEN)
2646 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2647 "Inode Path\n");
2648 else {
2649 struct sock *s = v;
2650 struct unix_sock *u = unix_sk(s);
2651 unix_state_lock(s);
2652
2653 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2654 s,
2655 atomic_read(&s->sk_refcnt),
2656 0,
2657 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2658 s->sk_type,
2659 s->sk_socket ?
2660 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2661 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2662 sock_i_ino(s));
2663
2664 if (u->addr) {
2665 int i, len;
2666 seq_putc(seq, ' ');
2667
2668 i = 0;
2669 len = u->addr->len - sizeof(short);
2670 if (!UNIX_ABSTRACT(s))
2671 len--;
2672 else {
2673 seq_putc(seq, '@');
2674 i++;
2675 }
2676 for ( ; i < len; i++)
2677 seq_putc(seq, u->addr->name->sun_path[i]);
2678 }
2679 unix_state_unlock(s);
2680 seq_putc(seq, '\n');
2681 }
2682
2683 return 0;
2684 }
2685
2686 static const struct seq_operations unix_seq_ops = {
2687 .start = unix_seq_start,
2688 .next = unix_seq_next,
2689 .stop = unix_seq_stop,
2690 .show = unix_seq_show,
2691 };
2692
2693 static int unix_seq_open(struct inode *inode, struct file *file)
2694 {
2695 return seq_open_net(inode, file, &unix_seq_ops,
2696 sizeof(struct seq_net_private));
2697 }
2698
2699 static const struct file_operations unix_seq_fops = {
2700 .owner = THIS_MODULE,
2701 .open = unix_seq_open,
2702 .read = seq_read,
2703 .llseek = seq_lseek,
2704 .release = seq_release_net,
2705 };
2706
2707 #endif
2708
2709 static const struct net_proto_family unix_family_ops = {
2710 .family = PF_UNIX,
2711 .create = unix_create,
2712 .owner = THIS_MODULE,
2713 };
2714
2715
2716 static int __net_init unix_net_init(struct net *net)
2717 {
2718 int error = -ENOMEM;
2719
2720 net->unx.sysctl_max_dgram_qlen = 10;
2721 if (unix_sysctl_register(net))
2722 goto out;
2723
2724 #ifdef CONFIG_PROC_FS
2725 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2726 unix_sysctl_unregister(net);
2727 goto out;
2728 }
2729 #endif
2730 error = 0;
2731 out:
2732 return error;
2733 }
2734
2735 static void __net_exit unix_net_exit(struct net *net)
2736 {
2737 unix_sysctl_unregister(net);
2738 remove_proc_entry("unix", net->proc_net);
2739 }
2740
2741 static struct pernet_operations unix_net_ops = {
2742 .init = unix_net_init,
2743 .exit = unix_net_exit,
2744 };
2745
2746 static int __init af_unix_init(void)
2747 {
2748 int rc = -1;
2749
2750 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2751
2752 rc = proto_register(&unix_proto, 1);
2753 if (rc != 0) {
2754 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2755 __func__);
2756 goto out;
2757 }
2758
2759 sock_register(&unix_family_ops);
2760 register_pernet_subsys(&unix_net_ops);
2761 out:
2762 return rc;
2763 }
2764
2765 static void __exit af_unix_exit(void)
2766 {
2767 sock_unregister(PF_UNIX);
2768 proto_unregister(&unix_proto);
2769 unregister_pernet_subsys(&unix_net_ops);
2770 }
2771
2772 /* Earlier than device_initcall() so that other drivers invoking
2773 request_module() don't end up in a loop when modprobe tries
2774 to use a UNIX socket. But later than subsys_initcall() because
2775 we depend on stuff initialised there */
2776 fs_initcall(af_unix_init);
2777 module_exit(af_unix_exit);
2778
2779 MODULE_LICENSE("GPL");
2780 MODULE_ALIAS_NETPROTO(PF_UNIX);