Merge tag 'v3.10.92' into update
authorStricted <info@stricted.net>
Wed, 21 Mar 2018 21:49:35 +0000 (22:49 +0100)
committerStricted <info@stricted.net>
Wed, 21 Mar 2018 21:49:35 +0000 (22:49 +0100)
This is the 3.10.92 stable release

1  2 
Makefile
include/net/af_unix.h
include/net/sock.h
kernel/workqueue.c
net/core/ethtool.c
net/unix/af_unix.c

diff --combined Makefile
index 96feafbd023bc5418f24e40a1160d0a851bcb01d,25701b67bb6d932f732d709288c6e7478f0a176a..b851265d9587101dfbca619ed7698350b68230d2
+++ b/Makefile
@@@ -1,6 -1,6 +1,6 @@@
  VERSION = 3
  PATCHLEVEL = 10
- SUBLEVEL = 91
+ SUBLEVEL = 92
  EXTRAVERSION =
  NAME = TOSSUG Baby Fish
  
@@@ -374,7 -374,7 +374,7 @@@ KBUILD_CFLAGS   := -Wall -Wundef -Wstri
                   -Werror-implicit-function-declaration \
                   -Wno-format-security \
                   -fno-delete-null-pointer-checks \
 -                 -std=gnu89
 +                 -w -std=gnu89
  
  KBUILD_AFLAGS_KERNEL :=
  KBUILD_CFLAGS_KERNEL :=
diff --combined include/net/af_unix.h
index 9120783132e71c5d653e85c6b300507b579457b7,e927d3e80b61597da1a2591c8ffee0a2ad92d1f6..6867600245725a22f4431533f1d35878fbe11bed
@@@ -62,9 -62,12 +62,13 @@@ struct unix_sock 
  #define UNIX_GC_CANDIDATE     0
  #define UNIX_GC_MAYBE_CYCLE   1
        struct socket_wq        peer_wq;
 +      wait_queue_t            peer_wake;
  };
- #define unix_sk(__sk) ((struct unix_sock *)__sk)
+ static inline struct unix_sock *unix_sk(struct sock *sk)
+ {
+       return (struct unix_sock *)sk;
+ }
  
  #define peer_wait peer_wq.wait
  
diff --combined include/net/sock.h
index 02199accb6be2b9ddeffdef24dc001c686b2e9d5,95dc0c8a9dac078d640bec98170b8eb81b8d7cc5..3547a6f347d14fd4de56a4d00c1d5c0d78dc4bec
@@@ -780,12 -780,16 +780,21 @@@ static inline __must_check int sk_add_b
                                              unsigned int limit)
  {
        if (sk_rcvqueues_full(sk, skb, limit))
 +      {
 +              #ifdef CONFIG_MTK_NET_LOGGING 
 +              printk(KERN_ERR "[mtk_net][sock]sk_add_backlog->sk_rcvqueues_full sk->sk_rcvbuf:%d,sk->sk_sndbuf:%d ",sk->sk_rcvbuf,sk->sk_sndbuf);
 +              #endif          
                return -ENOBUFS;
 +      }
+       /*
+        * If the skb was allocated from pfmemalloc reserves, only
+        * allow SOCK_MEMALLOC sockets to use it as this socket is
+        * helping free memory
+        */
+       if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
+               return -ENOMEM;
        __sk_add_backlog(sk, skb);
        sk->sk_backlog.len += skb->truesize;
        return 0;
diff --combined kernel/workqueue.c
index 1c020389445b25383adc76d557aad8a75a5b2067,fa927fd5778d5468a504b82c193043589a1dc0c2..3391a5cd0c583773b77c44ab58ca236e0659f237
@@@ -507,13 -507,6 +507,13 @@@ static inline void debug_work_activate(
  static inline void debug_work_deactivate(struct work_struct *work) { }
  #endif
  
 +#ifdef CONFIG_MTK_WQ_DEBUG
 +extern void mttrace_workqueue_execute_work(struct work_struct *work);
 +extern void mttrace_workqueue_activate_work(struct work_struct *work);
 +extern void mttrace_workqueue_queue_work(unsigned int req_cpu, struct work_struct *work);
 +extern void mttrace_workqueue_execute_end(struct work_struct *work);
 +#endif //CONFIG_MTK_WQ_DEBUG
 +
  /* allocate ID and assign it to @pool */
  static int worker_pool_assign_id(struct worker_pool *pool)
  {
@@@ -1084,9 -1077,6 +1084,9 @@@ static void pwq_activate_delayed_work(s
        struct pool_workqueue *pwq = get_work_pwq(work);
  
        trace_workqueue_activate_work(work);
 +#ifdef CONFIG_MTK_WQ_DEBUG
 +      mttrace_workqueue_activate_work(work);
 +#endif //CONFIG_MTK_WQ_DEBUG
        move_linked_works(work, &pwq->pool->worklist, NULL);
        __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
        pwq->nr_active++;
@@@ -1374,9 -1364,6 +1374,9 @@@ retry
  
        /* pwq determined, queue */
        trace_workqueue_queue_work(req_cpu, pwq, work);
 +#ifdef CONFIG_MTK_WQ_DEBUG
 +      mttrace_workqueue_queue_work(cpu, work);
 +#endif //CONFIG_MTK_WQ_DEBUG
  
        if (WARN_ON(!list_empty(&work->entry))) {
                spin_unlock(&pwq->pool->lock);
  
        if (likely(pwq->nr_active < pwq->max_active)) {
                trace_workqueue_activate_work(work);
 +#ifdef CONFIG_MTK_WQ_DEBUG
 +              mttrace_workqueue_activate_work(work);
 +#endif //CONFIG_MTK_WQ_DEBUG
                pwq->nr_active++;
                worklist = &pwq->pool->worklist;
        } else {
@@@ -1466,13 -1450,13 +1466,13 @@@ static void __queue_delayed_work(int cp
        timer_stats_timer_set_start_info(&dwork->timer);
  
        dwork->wq = wq;
+       /* timer isn't guaranteed to run in this cpu, record earlier */
+       if (cpu == WORK_CPU_UNBOUND)
+               cpu = raw_smp_processor_id();
        dwork->cpu = cpu;
        timer->expires = jiffies + delay;
  
-       if (unlikely(cpu != WORK_CPU_UNBOUND))
-               add_timer_on(timer, cpu);
-       else
-               add_timer(timer);
+       add_timer_on(timer, cpu);
  }
  
  /**
@@@ -2119,9 -2103,6 +2119,9 @@@ __acquires(&pool->lock
        bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
        int work_color;
        struct worker *collision;
 +      unsigned long long exec_start;
 +      char func[128];
 +
  #ifdef CONFIG_LOCKDEP
        /*
         * It is permissible to free the struct work_struct from
  
        lock_map_acquire_read(&pwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
 +
 +      exec_start = sched_clock();
 +      sprintf(func, "%pf", work->func);
 +
        trace_workqueue_execute_start(work);
 +#ifdef CONFIG_MTK_WQ_DEBUG
 +      mttrace_workqueue_execute_work(work);
 +#endif //CONFIG_MTK_WQ_DEBUG
 +
        worker->current_func(work);
 +              
        /*
         * While we must be careful to not use "work" after this, the trace
         * point will only record its address.
         */
        trace_workqueue_execute_end(work);
 +#ifdef CONFIG_MTK_WQ_DEBUG
 +      mttrace_workqueue_execute_end(work);
 +#endif //CONFIG_MTK_WQ_DEBUG
 +
 +      if ((sched_clock() - exec_start)> 1000000000) // dump log if execute more than 1 sec
 +              pr_warning("WQ warning! work (%s, %p) execute more than 1 sec, time: %llu ns\n", func, work, sched_clock() - exec_start);
 +      
        lock_map_release(&lockdep_map);
        lock_map_release(&pwq->wq->lockdep_map);
  
diff --combined net/core/ethtool.c
index 900a05fd90d2e39338dcdcecab5c28cf01d48107,213b612551400f1babd6e79930d0a997120ca1e9..ae2f65629e52f174542921b4b8466fe3633ef818
@@@ -711,13 -711,11 +711,13 @@@ static int ethtool_reset(struct net_dev
  
  static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
  {
 -      struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
 +      struct ethtool_wolinfo wol;
  
        if (!dev->ethtool_ops->get_wol)
                return -EOPNOTSUPP;
  
 +      memset(&wol, 0, sizeof(struct ethtool_wolinfo));
 +      wol.cmd = ETHTOOL_GWOL;
        dev->ethtool_ops->get_wol(dev, &wol);
  
        if (copy_to_user(useraddr, &wol, sizeof(wol)))
@@@ -1068,7 -1066,7 +1068,7 @@@ static int ethtool_get_strings(struct n
  
        gstrings.len = ret;
  
-       data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+       data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
        if (!data)
                return -ENOMEM;
  
diff --combined net/unix/af_unix.c
index 8ccc661efa5304978a559f8b9b7c5c7e4a302dba,825c029bf0927e78ed520aa701850bcc6a92c37f..8db136a9d87b05ad1195402223028236f30326ce
  #include <linux/mount.h>
  #include <net/checksum.h>
  #include <linux/security.h>
 +#include <linux/freezer.h>
 +
 +
 +#include <linux/uio.h>
 +#include <linux/blkdev.h>
 +#include <linux/compat.h>
 +#include <linux/rtc.h>
 +#include <asm/kmap_types.h>
 +#include <linux/device.h>
 +
  
  struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
  EXPORT_SYMBOL_GPL(unix_socket_table);
@@@ -144,17 -134,6 +144,17 @@@ static struct hlist_head *unix_sockets_
  
  #define UNIX_ABSTRACT(sk)     (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
  
 +
 +//for aee interface start
 +#define __UNIX_SOCKET_OUTPUT_BUF_SIZE__   3500
 +static struct proc_dir_entry *gunix_socket_track_aee_entry = NULL;
 +#define UNIX_SOCK_TRACK_AEE_PROCNAME "driver/usktrk_aee"
 +#define UNIX_SOCK_TRACK_PROC_AEE_SIZE 3072
 +
 +static volatile unsigned int unix_sock_track_stop_flag = 0;
 +#define unix_peer(sk) (unix_sk(sk)->peer)
 +
 +
  #ifdef CONFIG_SECURITY_NETWORK
  static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
  {
@@@ -187,7 -166,7 +187,7 @@@ static inline unsigned int unix_hash_fo
        return hash&(UNIX_HASH_SIZE-1);
  }
  
 -#define unix_peer(sk) (unix_sk(sk)->peer)
 +
  
  static inline int unix_our_peer(struct sock *sk, struct sock *osk)
  {
@@@ -334,118 -313,6 +334,118 @@@ found
        return s;
  }
  
 +/* Support code for asymmetrically connected dgram sockets
 + *
 + * If a datagram socket is connected to a socket not itself connected
 + * to the first socket (eg, /dev/log), clients may only enqueue more
 + * messages if the present receive queue of the server socket is not
 + * "too large". This means there's a second writeability condition
 + * poll and sendmsg need to test. The dgram recv code will do a wake
 + * up on the peer_wait wait queue of a socket upon reception of a
 + * datagram which needs to be propagated to sleeping would-be writers
 + * since these might not have sent anything so far. This can't be
 + * accomplished via poll_wait because the lifetime of the server
 + * socket might be less than that of its clients if these break their
 + * association with it or if the server socket is closed while clients
 + * are still connected to it and there's no way to inform "a polling
 + * implementation" that it should let go of a certain wait queue
 + *
 + * In order to propagate a wake up, a wait_queue_t of the client
 + * socket is enqueued on the peer_wait queue of the server socket
 + * whose wake function does a wake_up on the ordinary client socket
 + * wait queue. This connection is established whenever a write (or
 + * poll for write) hit the flow control condition and broken when the
 + * association to the server socket is dissolved or after a wake up
 + * was relayed.
 + */
 +
 +static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
 +                                    void *key)
 +{
 +      struct unix_sock *u;
 +      wait_queue_head_t *u_sleep;
 +
 +      u = container_of(q, struct unix_sock, peer_wake);
 +
 +      __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
 +                          q);
 +      u->peer_wake.private = NULL;
 +
 +      /* relaying can only happen while the wq still exists */
 +      u_sleep = sk_sleep(&u->sk);
 +      if (u_sleep)
 +              wake_up_interruptible_poll(u_sleep, key);
 +
 +      return 0;
 +}
 +
 +static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
 +{
 +      struct unix_sock *u, *u_other;
 +      int rc;
 +
 +      u = unix_sk(sk);
 +      u_other = unix_sk(other);
 +      rc = 0;
 +      spin_lock(&u_other->peer_wait.lock);
 +
 +      if (!u->peer_wake.private) {
 +              u->peer_wake.private = other;
 +              __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
 +
 +              rc = 1;
 +      }
 +
 +      spin_unlock(&u_other->peer_wait.lock);
 +      return rc;
 +}
 +
 +static void unix_dgram_peer_wake_disconnect(struct sock *sk,
 +                                          struct sock *other)
 +{
 +      struct unix_sock *u, *u_other;
 +
 +      u = unix_sk(sk);
 +      u_other = unix_sk(other);
 +      spin_lock(&u_other->peer_wait.lock);
 +
 +      if (u->peer_wake.private == other) {
 +              __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
 +              u->peer_wake.private = NULL;
 +      }
 +
 +      spin_unlock(&u_other->peer_wait.lock);
 +}
 +
 +static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
 +                                                 struct sock *other)
 +{
 +      unix_dgram_peer_wake_disconnect(sk, other);
 +      wake_up_interruptible_poll(sk_sleep(sk),
 +                                 POLLOUT |
 +                                 POLLWRNORM |
 +                                 POLLWRBAND);
 +}
 +
 +/* preconditions:
 + *    - unix_peer(sk) == other
 + *    - association is stable
 + */
 +static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
 +{
 +      int connected;
 +
 +      connected = unix_dgram_peer_wake_connect(sk, other);
 +
 +      if (unix_recvq_full(other))
 +              return 1;
 +
 +      if (connected)
 +              unix_dgram_peer_wake_disconnect(sk, other);
 +
 +      return 0;
 +}
 +
  static inline int unix_writable(struct sock *sk)
  {
        return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
@@@ -497,9 -364,7 +497,9 @@@ static void unix_sock_destructor(struc
        WARN_ON(!sk_unhashed(sk));
        WARN_ON(sk->sk_socket);
        if (!sock_flag(sk, SOCK_DEAD)) {
 -              printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
 +              #ifdef CONFIG_MTK_NET_LOGGING 
 +              printk(KERN_INFO "[mtk_net][unix]Attempt to release alive unix socket: %p\n", sk);
 +              #endif
                return;
        }
  
        local_bh_disable();
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
        local_bh_enable();
 -#ifdef UNIX_REFCNT_DEBUG
 -      printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
 +    #ifdef UNIX_REFCNT_DEBUG
 +      printk(KERN_DEBUG "[mtk_net][unix]UNIX %p is destroyed, %ld are still alive.\n", sk,
                atomic_long_read(&unix_nr_socks));
 -#endif
 +    #endif
  }
  
  static void unix_release_sock(struct sock *sk, int embrion)
                        skpair->sk_state_change(skpair);
                        sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
                }
 +
 +              unix_dgram_peer_wake_disconnect(sk, skpair);
                sock_put(skpair); /* It may now die */
                unix_peer(sk) = NULL;
        }
@@@ -635,7 -498,6 +635,7 @@@ out_unlock
        unix_state_unlock(sk);
        put_pid(old_pid);
  out:
 +   
        return err;
  }
  
@@@ -788,7 -650,6 +788,7 @@@ static struct sock *unix_create1(struc
        INIT_LIST_HEAD(&u->link);
        mutex_init(&u->readlock); /* single task reading lock */
        init_waitqueue_head(&u->peer_wait);
 +      init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
        unix_insert_socket(unix_sockets_unbound(sk), sk);
  out:
        if (sk == NULL)
@@@ -1035,8 -896,7 +1035,8 @@@ static int unix_bind(struct socket *soc
        atomic_set(&addr->refcnt, 1);
  
        if (sun_path[0]) {
 -              struct path path;
 +              struct path path;      
 +
                umode_t mode = S_IFSOCK |
                       (SOCK_INODE(sock)->i_mode & ~current_umask());
                err = unix_mknod(sun_path, mode, &path);
@@@ -1073,7 -933,6 +1073,7 @@@ out_unlock
  out_up:
        mutex_unlock(&u->readlock);
  out:
 + 
        return err;
  }
  
@@@ -1113,7 -972,6 +1113,7 @@@ static int unix_dgram_connect(struct so
        int err;
  
        if (addr->sa_family != AF_UNSPEC) {
 +     
                err = unix_mkname(sunaddr, alen, &hash);
                if (err < 0)
                        goto out;
@@@ -1159,8 -1017,6 +1159,8 @@@ restart
        if (unix_peer(sk)) {
                struct sock *old_peer = unix_peer(sk);
                unix_peer(sk) = other;
 +              unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
 +
                unix_state_double_unlock(sk, other);
  
                if (other != old_peer)
                unix_peer(sk) = other;
                unix_state_double_unlock(sk, other);
        }
 +      
 +#ifdef CONFIG_MTK_NET_LOGGING 
 +    if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
 +    {
 +             printk(KERN_INFO "[mtk_net][socket]unix_dgram_connect[%lu]:connect [%s] other[%lu]\n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
 +        }
 +#endif 
 +            
        return 0;
  
  out_unlock:
        unix_state_double_unlock(sk, other);
        sock_put(other);
  out:
 +     
        return err;
  }
  
@@@ -1367,17 -1214,8 +1367,17 @@@ restart
        __skb_queue_tail(&other->sk_receive_queue, skb);
        spin_unlock(&other->sk_receive_queue.lock);
        unix_state_unlock(other);
 +      
 +      #ifdef CONFIG_MTK_NET_LOGGING 
 +      if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
 +  {
 +        printk(KERN_INFO "[mtk_net][socket]unix_stream_connect[%lu ]: connect [%s] other[%lu] \n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
 +      }
 +  #endif 
 +
        other->sk_data_ready(other, 0);
        sock_put(other);
 +       
        return 0;
  
  out_unlock:
@@@ -1390,7 -1228,6 +1390,7 @@@ out
                unix_release_sock(newsk, 0);
        if (other)
                sock_put(other);
 +    
        return err;
  }
  
@@@ -1442,7 -1279,7 +1442,7 @@@ static int unix_accept(struct socket *s
        /* If socket state is TCP_LISTEN it cannot change (for now...),
         * so that no locks are necessary.
         */
 -
 +    
        skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
        if (!skb) {
                /* This means receive shutdown. */
        unix_sock_inherit_flags(sock, newsock);
        sock_graft(tsk, newsock);
        unix_state_unlock(tsk);
 +    
        return 0;
  
  out:
 +    
        return err;
  }
  
@@@ -1621,8 -1456,7 +1621,8 @@@ static int unix_dgram_sendmsg(struct ki
        struct scm_cookie tmp_scm;
        int max_level;
        int data_len = 0;
 -
 +      int sk_locked;
 +       
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
        wait_for_unix_gc();
@@@ -1698,14 -1532,12 +1698,14 @@@ restart
                goto out_free;
        }
  
 +      sk_locked = 0;
        unix_state_lock(other);
 +restart_locked:
        err = -EPERM;
        if (!unix_may_send(sk, other))
                goto out_unlock;
  
 -      if (sock_flag(other, SOCK_DEAD)) {
 +      if (unlikely(sock_flag(other, SOCK_DEAD))) {
                /*
                 *      Check with 1003.1g - what should
                 *      datagram error
                unix_state_unlock(other);
                sock_put(other);
  
 -              err = 0;
 +              if (!sk_locked)
                unix_state_lock(sk);
                if (unix_peer(sk) == other) {
                        unix_peer(sk) = NULL;
 +                      unix_dgram_peer_wake_disconnect_wakeup(sk, other);
 +
                        unix_state_unlock(sk);
  
                        unix_dgram_disconnected(sk, other);
                        goto out_unlock;
        }
  
 -      if (unix_peer(other) != sk && unix_recvq_full(other)) {
 -              if (!timeo) {
 -                      err = -EAGAIN;
 -                      goto out_unlock;
 +      /* other == sk && unix_peer(other) != sk if
 +       * - unix_peer(sk) == NULL, destination address bound to sk
 +       * - unix_peer(sk) == sk by time of get but disconnected before lock
 +       */
 +      if (other != sk &&
 +          unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
 +              if (timeo) {
 +                      timeo = unix_wait_for_peer(other, timeo);
 +
 +                      err = sock_intr_errno(timeo);
 +                      if (signal_pending(current))
 +                              goto out_free;
 +
 +                      goto restart;
                }
  
 -              timeo = unix_wait_for_peer(other, timeo);
 +              if (!sk_locked) {
 +                      unix_state_unlock(other);
 +                      unix_state_double_lock(sk, other);
 +              }
  
 -              err = sock_intr_errno(timeo);
 -              if (signal_pending(current))
 -                      goto out_free;
 +              if (unix_peer(sk) != other ||
 +                  unix_dgram_peer_wake_me(sk, other)) {
 +                      err = -EAGAIN;
 +                      sk_locked = 1;
 +                      goto out_unlock;
 +              }
  
 -              goto restart;
 +              if (!sk_locked) {
 +                      sk_locked = 1;
 +                      goto restart_locked;
 +              }
        }
  
 +      if (unlikely(sk_locked))
 +              unix_state_unlock(sk);
 +
        if (sock_flag(other, SOCK_RCVTSTAMP))
                __net_timestamp(skb);
        maybe_add_creds(skb, sock, other);
        other->sk_data_ready(other, len);
        sock_put(other);
        scm_destroy(siocb->scm);
 +    
        return len;
  
  out_unlock:
 +      if (sk_locked)
 +              unix_state_unlock(sk);
        unix_state_unlock(other);
  out_free:
        kfree_skb(skb);
@@@ -1804,7 -1609,6 +1804,7 @@@ out
        if (other)
                sock_put(other);
        scm_destroy(siocb->scm);
 +      
        return err;
  }
  
@@@ -1824,7 -1628,6 +1824,7 @@@ static int unix_stream_sendmsg(struct k
  
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
 +              
        wait_for_unix_gc();
        err = scm_send(sock, msg, siocb->scm, false);
        if (err < 0)
  
                skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
                                          &err);
 +              
  
                if (skb == NULL)
                        goto out_err;
  
                if (sock_flag(other, SOCK_DEAD) ||
                    (other->sk_shutdown & RCV_SHUTDOWN))
 +              {
 +                    if( other->sk_socket )
 +                    {
 +                        if(sk->sk_socket)
 +                        {
 +                
 +                         #ifdef CONFIG_MTK_NET_LOGGING 
 +                         printk(KERN_INFO " [mtk_net][unix]: sendmsg[%lu:%lu]:peer close\n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
 +                                       #endif
 +                       }
 +                       else{
 +                                          #ifdef CONFIG_MTK_NET_LOGGING 
 +                                      printk(KERN_INFO " [mtk_net][unix]: sendmsg[null:%lu]:peer close\n" ,SOCK_INODE(other->sk_socket)->i_ino);
 +                                      #endif
 +                       }        
 +
 +                  }
 +                  else        
 +                                      {
 +                                              #ifdef CONFIG_MTK_NET_LOGGING   
 +                                      printk(KERN_INFO " [mtk_net][unix]: sendmsg:peer close \n" );
 +                                      #endif
 +                              }
 +                              
 +          
                        goto pipe_err_free;
 +              }
  
                maybe_add_creds(skb, sock, other);
                skb_queue_tail(&other->sk_receive_queue, skb);
@@@ -1954,7 -1730,6 +1954,7 @@@ pipe_err
  out_err:
        scm_destroy(siocb->scm);
        siocb->scm = NULL;
 +        
        return sent ? : err;
  }
  
@@@ -2096,7 -1871,6 +2096,7 @@@ out_free
  out_unlock:
        mutex_unlock(&u->readlock);
  out:
 +      
        return err;
  }
  
@@@ -2122,7 -1896,7 +2122,7 @@@ static long unix_stream_data_wait(struc
  
                set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
                unix_state_unlock(sk);
 -              timeo = schedule_timeout(timeo);
 +              timeo = freezable_schedule_timeout(timeo);
                unix_state_lock(sk);
  
                if (sock_flag(sk, SOCK_DEAD))
@@@ -2152,7 -1926,6 +2152,7 @@@ static int unix_stream_recvmsg(struct k
        int err = 0;
        long timeo;
        int skip;
 +      struct sock * other = unix_peer(sk);
  
        err = -EINVAL;
        if (sk->sk_state != TCP_ESTABLISHED)
@@@ -2207,27 -1980,8 +2207,27 @@@ again
                        if (err)
                                goto unlock;
                        if (sk->sk_shutdown & RCV_SHUTDOWN)
 +                      {
 +                            if(sk && sk->sk_socket )
 +                            {
 +                                 if(other && other->sk_socket ){
 +                                      #ifdef CONFIG_MTK_NET_LOGGING 
 +                                      
 +                     printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to peer shutdown  \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
 +                                 #endif
 +                                 }else{                                  
 +                                      #ifdef CONFIG_MTK_NET_LOGGING                              
 +                     printk(KERN_INFO "[mtk_net][unix]: recvmsg[%lu:null]:exit read due to peer shutdown  \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
 +                     #endif
 +                                 }
 +                               }
 +                          else{       
 +                                      #ifdef CONFIG_MTK_NET_LOGGING 
 +                                 printk(KERN_INFO " [mtk_net][unix]: recvmsg: exit read due to peer shutdown \n" );
 +                                 #endif
 +                          }
                                goto unlock;
 -
 +                      }
                        unix_state_unlock(sk);
                        err = -EAGAIN;
                        if (!timeo)
                        mutex_unlock(&u->readlock);
  
                        timeo = unix_stream_data_wait(sk, timeo, last);
 +                        if (!timeo)
 +                        {
 +                            if(sk && sk->sk_socket )
 +                            {
 +                                if(other && other->sk_socket ){
 +                                      #ifdef CONFIG_MTK_NET_LOGGING 
 +                     printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to timeout  \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
 +                                 #endif
 +                                 }else{                                  
 +                                      #ifdef CONFIG_MTK_NET_LOGGING                              
 +                     printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:null]:exit read due to timeout  \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
 +                     #endif
 +                                  }                     
 +                         }
 +                         else 
 +                                      {
 +                                              #ifdef CONFIG_MTK_NET_LOGGING   
 +                                printk(KERN_INFO " [mtk_net][unix]: recvmsg:exit read due to timeout \n" );
 +                                #endif
 +                              }
 +                                
 +                       }
  
                        if (signal_pending(current)
                            ||  mutex_lock_interruptible(&u->readlock)) {
                        if (UNIXCB(skb).fp)
                                siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
  
-                       sk_peek_offset_fwd(sk, chunk);
+                       if (skip) {
+                               sk_peek_offset_fwd(sk, chunk);
+                               skip -= chunk;
+                       }
+                       if (UNIXCB(skb).fp)
+                               break;
  
+                       last = skb;
+                       unix_state_lock(sk);
+                       skb = skb_peek_next(skb, &sk->sk_receive_queue);
+                       if (skb)
+                               goto again;
+                       unix_state_unlock(sk);
                        break;
                }
        } while (size);
        mutex_unlock(&u->readlock);
        scm_recv(sock, msg, siocb->scm, flags);
  out:
 +  
        return copied ? : err;
  }
  
@@@ -2509,29 -2252,22 +2521,29 @@@ static unsigned int unix_dgram_poll(str
                        mask |= POLLHUP;
                /* connection hasn't started yet? */
                if (sk->sk_state == TCP_SYN_SENT)
 +    {
 +        
                        return mask;
 -      }
 +        }
 +  }
  
        /* No write status requested, avoid expensive OUT tests. */
        if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
 +  {     
                return mask;
 +  }
  
        writable = unix_writable(sk);
 -      other = unix_peer_get(sk);
 -      if (other) {
 -              if (unix_peer(other) != sk) {
 -                      sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
 -                      if (unix_recvq_full(other))
 -                              writable = 0;
 -              }
 -              sock_put(other);
 +      if (writable) {
 +              unix_state_lock(sk);
 +
 +              other = unix_peer(sk);
 +              if (other && unix_peer(other) != sk &&
 +                  unix_recvq_full(other) &&
 +                  unix_dgram_peer_wake_me(sk, other))
 +                      writable = 0;
 +
 +              unix_state_unlock(sk);
        }
  
        if (writable)