Merge tag 'v3.10.81' into update
authorStricted <info@stricted.net>
Wed, 21 Mar 2018 21:45:35 +0000 (22:45 +0100)
committerStricted <info@stricted.net>
Wed, 21 Mar 2018 21:45:35 +0000 (22:45 +0100)
This is the 3.10.81 stable release

1  2 
Makefile
block/genhd.c
net/ipv4/route.c
net/unix/af_unix.c

diff --combined Makefile
index 2a1fc8777d7ab05206e8a76c351d415880c8b3e5,6d19e37d36d593d08762317d7ae7237b7fdfb869..b89368579128288b7b386924e4feb1f53d875c9f
+++ b/Makefile
@@@ -1,6 -1,6 +1,6 @@@
  VERSION = 3
  PATCHLEVEL = 10
- SUBLEVEL = 80
+ SUBLEVEL = 81
  EXTRAVERSION =
  NAME = TOSSUG Baby Fish
  
@@@ -374,7 -374,7 +374,7 @@@ KBUILD_CFLAGS   := -Wall -Wundef -Wstri
                   -Werror-implicit-function-declaration \
                   -Wno-format-security \
                   -fno-delete-null-pointer-checks \
 -                 -std=gnu89
 +                 -w -std=gnu89
  
  KBUILD_AFLAGS_KERNEL :=
  KBUILD_CFLAGS_KERNEL :=
diff --combined block/genhd.c
index 7d63cc198afcecc0cbcf227aa6c7b25ce68600e6,b09f5fc94dee3f8cb2de7e277f82098a9d95b02c..57afc91c4a0503557d32eba2ccd9fd106ed7c60f
@@@ -17,8 -17,6 +17,8 @@@
  #include <linux/kobj_map.h>
  #include <linux/mutex.h>
  #include <linux/idr.h>
 +#include <linux/ctype.h>
 +#include <linux/fs_uuid.h>
  #include <linux/log2.h>
  #include <linux/pm_runtime.h>
  
@@@ -424,9 -422,9 +424,9 @@@ int blk_alloc_devt(struct hd_struct *pa
        /* allocate ext devt */
        idr_preload(GFP_KERNEL);
  
-       spin_lock(&ext_devt_lock);
+       spin_lock_bh(&ext_devt_lock);
        idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
-       spin_unlock(&ext_devt_lock);
+       spin_unlock_bh(&ext_devt_lock);
  
        idr_preload_end();
        if (idx < 0)
@@@ -451,9 -449,9 +451,9 @@@ void blk_free_devt(dev_t devt
                return;
  
        if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
-               spin_lock(&ext_devt_lock);
+               spin_lock_bh(&ext_devt_lock);
                idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
-               spin_unlock(&ext_devt_lock);
+               spin_unlock_bh(&ext_devt_lock);
        }
  }
  
@@@ -693,13 -691,13 +693,13 @@@ struct gendisk *get_gendisk(dev_t devt
        } else {
                struct hd_struct *part;
  
-               spin_lock(&ext_devt_lock);
+               spin_lock_bh(&ext_devt_lock);
                part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
                if (part && get_disk(part_to_disk(part))) {
                        *partno = part->partno;
                        disk = part_to_disk(part);
                }
-               spin_unlock(&ext_devt_lock);
+               spin_unlock_bh(&ext_devt_lock);
        }
  
        return disk;
@@@ -831,7 -829,6 +831,7 @@@ static void disk_seqf_stop(struct seq_f
        if (iter) {
                class_dev_iter_exit(iter);
                kfree(iter);
 +              seqf->private = NULL;
        }
  }
  
@@@ -1119,22 -1116,6 +1119,22 @@@ static void disk_release(struct device 
                blk_put_queue(disk->queue);
        kfree(disk);
  }
 +
 +static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
 +{
 +      struct gendisk *disk = dev_to_disk(dev);
 +      struct disk_part_iter piter;
 +      struct hd_struct *part;
 +      int cnt = 0;
 +
 +      disk_part_iter_init(&piter, disk, 0);
 +      while((part = disk_part_iter_next(&piter)))
 +              cnt++;
 +      disk_part_iter_exit(&piter);
 +      add_uevent_var(env, "NPARTS=%u", cnt);
 +      return 0;
 +}
 +
  struct class block_class = {
        .name           = "block",
  };
@@@ -1154,7 -1135,6 +1154,7 @@@ static struct device_type disk_type = 
        .groups         = disk_attr_groups,
        .release        = disk_release,
        .devnode        = block_devnode,
 +      .uevent         = disk_uevent,
  };
  
  #ifdef CONFIG_PROC_FS
@@@ -1405,87 -1385,6 +1405,87 @@@ int invalidate_partition(struct gendis
  
  EXPORT_SYMBOL(invalidate_partition);
  
 +dev_t blk_lookup_fs_info(struct fs_info *seek)
 +{
 +      dev_t devt = MKDEV(0, 0);
 +      struct class_dev_iter iter;
 +      struct device *dev;
 +      int best_score = 0;
 +
 +      class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
 +      while (best_score < 3 && (dev = class_dev_iter_next(&iter))) {
 +              struct gendisk *disk = dev_to_disk(dev);
 +              struct disk_part_iter piter;
 +              struct hd_struct *part;
 +
 +              disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 +
 +              while (best_score < 3 && (part = disk_part_iter_next(&piter))) {
 +                      int score = part_matches_fs_info(part, seek);
 +                      if (score > best_score) {
 +                              devt = part_devt(part);
 +                              best_score = score;
 +                      }
 +              }
 +              disk_part_iter_exit(&piter);
 +      }
 +      class_dev_iter_exit(&iter);
 +      return devt;
 +}
 +EXPORT_SYMBOL_GPL(blk_lookup_fs_info);
 +
 +/* Caller uses NULL, key to start. For each match found, we return a bdev on
 + * which we have done blkdev_get, and we do the blkdev_put on block devices
 + * that are passed to us. When no more matches are found, we return NULL.
 + */
 +struct block_device *next_bdev_of_type(struct block_device *last,
 +      const char *key)
 +{
 +      dev_t devt = MKDEV(0, 0);
 +      struct class_dev_iter iter;
 +      struct device *dev;
 +      struct block_device *next = NULL, *bdev;
 +      int got_last = 0;
 +
 +      if (!key)
 +              goto out;
 +
 +      class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
 +      while (!devt && (dev = class_dev_iter_next(&iter))) {
 +              struct gendisk *disk = dev_to_disk(dev);
 +              struct disk_part_iter piter;
 +              struct hd_struct *part;
 +
 +              disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 +
 +              while ((part = disk_part_iter_next(&piter))) {
 +                      bdev = bdget(part_devt(part));
 +                      if (last && !got_last) {
 +                              if (last == bdev)
 +                                      got_last = 1;
 +                              continue;
 +                      }
 +
 +                      if (blkdev_get(bdev, FMODE_READ, 0))
 +                              continue;
 +
 +                      if (bdev_matches_key(bdev, key)) {
 +                              next = bdev;
 +                              break;
 +                      }
 +
 +                      blkdev_put(bdev, FMODE_READ);
 +              }
 +              disk_part_iter_exit(&piter);
 +      }
 +      class_dev_iter_exit(&iter);
 +out:
 +      if (last)
 +              blkdev_put(last, FMODE_READ);
 +      return next;
 +}
 +EXPORT_SYMBOL_GPL(next_bdev_of_type);
 +
  /*
   * Disk events - monitor disk events like media change and eject request.
   */
@@@ -1506,17 -1405,11 +1506,17 @@@ struct disk_events 
  static const char *disk_events_strs[] = {
        [ilog2(DISK_EVENT_MEDIA_CHANGE)]        = "media_change",
        [ilog2(DISK_EVENT_EJECT_REQUEST)]       = "eject_request",
 +#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT  
 +      [ilog2(DISK_EVENT_MEDIA_DISAPPEAR)]     = "media_disappear",
 +#endif
  };
  
  static char *disk_uevents[] = {
        [ilog2(DISK_EVENT_MEDIA_CHANGE)]        = "DISK_MEDIA_CHANGE=1",
        [ilog2(DISK_EVENT_EJECT_REQUEST)]       = "DISK_EJECT_REQUEST=1",
 +#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT  
 +      [ilog2(DISK_EVENT_MEDIA_DISAPPEAR)] = "DISK_EVENT_MEDIA_DISAPPEAR=1",   
 +#endif        
  };
  
  /* list of all disk_events */
@@@ -1524,10 -1417,7 +1524,10 @@@ static DEFINE_MUTEX(disk_events_mutex)
  static LIST_HEAD(disk_events);
  
  /* disable in-kernel polling by default */
 -static unsigned long disk_events_dfl_poll_msecs       = 0;
 +//ALPS00319570, CL955952 merged back, begin
 +//static unsigned long disk_events_dfl_poll_msecs     = 0;    //original
 +static unsigned long disk_events_dfl_poll_msecs       = 2000;
 +//ALPS00319570, CL955952 merged back, end
  
  static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
  {
diff --combined net/ipv4/route.c
index 577801102a70c799156a4614e942d7a1d071876b,222e1b6141d359762d6536d029000fb3fb3e8725..52fceb025a7201abe30d9e03373056da1fc28c88
@@@ -515,7 -515,7 +515,7 @@@ void __ip_select_ident(struct iphdr *ip
  }
  EXPORT_SYMBOL(__ip_select_ident);
  
 -static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
 +static void __build_flow_key(struct flowi4 *fl4, struct sock *sk,
                             const struct iphdr *iph,
                             int oif, u8 tos,
                             u8 prot, u32 mark, int flow_flags)
        flowi4_init_output(fl4, oif, mark, tos,
                           RT_SCOPE_UNIVERSE, prot,
                           flow_flags,
 -                         iph->daddr, iph->saddr, 0, 0);
 +                         iph->daddr, iph->saddr, 0, 0,
 +                         sk ? sock_i_uid(sk) : 0);
  }
  
  static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
 -                             const struct sock *sk)
 +                             struct sock *sk)
  {
        const struct iphdr *iph = ip_hdr(skb);
        int oif = skb->dev->ifindex;
        __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
  }
  
 -static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
 +static void build_sk_flow_key(struct flowi4 *fl4, struct sock *sk)
  {
        const struct inet_sock *inet = inet_sk(sk);
        const struct ip_options_rcu *inet_opt;
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
                           inet_sk_flowi_flags(sk),
 -                         daddr, inet->inet_saddr, 0, 0);
 +                         daddr, inet->inet_saddr, 0, 0,
 +                         sock_i_uid(sk));
        rcu_read_unlock();
  }
  
 -static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
 +static void ip_rt_build_flow_key(struct flowi4 *fl4, struct sock *sk,
                                 const struct sk_buff *skb)
  {
        if (skb)
@@@ -873,6 -871,10 +873,10 @@@ static int ip_error(struct sk_buff *skb
        bool send;
        int code;
  
+       /* IP on this device is disabled. */
+       if (!in_dev)
+               goto out;
        net = dev_net(rt->dst.dev);
        if (!IN_DEV_FORWARD(in_dev)) {
                switch (rt->dst.error) {
@@@ -973,9 -975,6 +977,9 @@@ void ipv4_update_pmtu(struct sk_buff *s
        struct flowi4 fl4;
        struct rtable *rt;
  
 +      if (!mark)
 +              mark = IP4_REPLY_MARK(net, skb->mark);
 +
        __build_flow_key(&fl4, NULL, iph, oif,
                         RT_TOS(iph->tos), protocol, mark, flow_flags);
        rt = __ip_route_output_key(net, &fl4);
@@@ -993,10 -992,6 +997,10 @@@ static void __ipv4_sk_update_pmtu(struc
        struct rtable *rt;
  
        __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
 +
 +      if (!fl4.flowi4_mark)
 +              fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
 +
        rt = __ip_route_output_key(sock_net(sk), &fl4);
        if (!IS_ERR(rt)) {
                __ip_rt_update_pmtu(rt, &fl4, mtu);
@@@ -2310,11 -2305,6 +2314,11 @@@ static int rt_fill_info(struct net *net
            nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
                goto nla_put_failure;
  
 +      if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
 +          nla_put_u32(skb, RTA_UID,
 +                      from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
 +              goto nla_put_failure;
 +
        error = rt->dst.error;
  
        if (rt_is_input_route(rt)) {
@@@ -2364,7 -2354,6 +2368,7 @@@ static int inet_rtm_getroute(struct sk_
        int err;
        int mark;
        struct sk_buff *skb;
 +      kuid_t uid;
  
        err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
        if (err < 0)
        dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
        iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
        mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
 +      if (tb[RTA_UID])
 +              uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
 +      else
 +              uid = (iif ? INVALID_UID : current_uid());
  
        memset(&fl4, 0, sizeof(fl4));
        fl4.daddr = dst;
        fl4.flowi4_tos = rtm->rtm_tos;
        fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
        fl4.flowi4_mark = mark;
 +      fl4.flowi4_uid = uid;
  
        if (iif) {
                struct net_device *dev;
diff --combined net/unix/af_unix.c
index 9b13da4e644d487f2475d4313563f73ddef47646,123c16419cbe91581fb5435ce39c9436d3c5a2e6..8ccc661efa5304978a559f8b9b7c5c7e4a302dba
  #include <linux/mount.h>
  #include <net/checksum.h>
  #include <linux/security.h>
 +#include <linux/freezer.h>
 +
 +
 +#include <linux/uio.h>
 +#include <linux/blkdev.h>
 +#include <linux/compat.h>
 +#include <linux/rtc.h>
 +#include <asm/kmap_types.h>
 +#include <linux/device.h>
 +
  
  struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
  EXPORT_SYMBOL_GPL(unix_socket_table);
@@@ -144,17 -134,6 +144,17 @@@ static struct hlist_head *unix_sockets_
  
  #define UNIX_ABSTRACT(sk)     (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
  
 +
 +//for aee interface start
 +#define __UNIX_SOCKET_OUTPUT_BUF_SIZE__   3500
 +static struct proc_dir_entry *gunix_socket_track_aee_entry = NULL;
 +#define UNIX_SOCK_TRACK_AEE_PROCNAME "driver/usktrk_aee"
 +#define UNIX_SOCK_TRACK_PROC_AEE_SIZE 3072
 +
 +static volatile unsigned int unix_sock_track_stop_flag = 0;
 +#define unix_peer(sk) (unix_sk(sk)->peer)
 +
 +
  #ifdef CONFIG_SECURITY_NETWORK
  static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
  {
@@@ -187,7 -166,7 +187,7 @@@ static inline unsigned int unix_hash_fo
        return hash&(UNIX_HASH_SIZE-1);
  }
  
 -#define unix_peer(sk) (unix_sk(sk)->peer)
 +
  
  static inline int unix_our_peer(struct sock *sk, struct sock *osk)
  {
@@@ -334,118 -313,6 +334,118 @@@ found
        return s;
  }
  
 +/* Support code for asymmetrically connected dgram sockets
 + *
 + * If a datagram socket is connected to a socket not itself connected
 + * to the first socket (eg, /dev/log), clients may only enqueue more
 + * messages if the present receive queue of the server socket is not
 + * "too large". This means there's a second writeability condition
 + * poll and sendmsg need to test. The dgram recv code will do a wake
 + * up on the peer_wait wait queue of a socket upon reception of a
 + * datagram which needs to be propagated to sleeping would-be writers
 + * since these might not have sent anything so far. This can't be
 + * accomplished via poll_wait because the lifetime of the server
 + * socket might be less than that of its clients if these break their
 + * association with it or if the server socket is closed while clients
 + * are still connected to it and there's no way to inform "a polling
 + * implementation" that it should let go of a certain wait queue
 + *
 + * In order to propagate a wake up, a wait_queue_t of the client
 + * socket is enqueued on the peer_wait queue of the server socket
 + * whose wake function does a wake_up on the ordinary client socket
 + * wait queue. This connection is established whenever a write (or
 + * poll for write) hit the flow control condition and broken when the
 + * association to the server socket is dissolved or after a wake up
 + * was relayed.
 + */
 +
 +static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
 +                                    void *key)
 +{
 +      struct unix_sock *u;
 +      wait_queue_head_t *u_sleep;
 +
 +      u = container_of(q, struct unix_sock, peer_wake);
 +
 +      __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
 +                          q);
 +      u->peer_wake.private = NULL;
 +
 +      /* relaying can only happen while the wq still exists */
 +      u_sleep = sk_sleep(&u->sk);
 +      if (u_sleep)
 +              wake_up_interruptible_poll(u_sleep, key);
 +
 +      return 0;
 +}
 +
 +static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
 +{
 +      struct unix_sock *u, *u_other;
 +      int rc;
 +
 +      u = unix_sk(sk);
 +      u_other = unix_sk(other);
 +      rc = 0;
 +      spin_lock(&u_other->peer_wait.lock);
 +
 +      if (!u->peer_wake.private) {
 +              u->peer_wake.private = other;
 +              __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
 +
 +              rc = 1;
 +      }
 +
 +      spin_unlock(&u_other->peer_wait.lock);
 +      return rc;
 +}
 +
 +static void unix_dgram_peer_wake_disconnect(struct sock *sk,
 +                                          struct sock *other)
 +{
 +      struct unix_sock *u, *u_other;
 +
 +      u = unix_sk(sk);
 +      u_other = unix_sk(other);
 +      spin_lock(&u_other->peer_wait.lock);
 +
 +      if (u->peer_wake.private == other) {
 +              __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
 +              u->peer_wake.private = NULL;
 +      }
 +
 +      spin_unlock(&u_other->peer_wait.lock);
 +}
 +
 +static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
 +                                                 struct sock *other)
 +{
 +      unix_dgram_peer_wake_disconnect(sk, other);
 +      wake_up_interruptible_poll(sk_sleep(sk),
 +                                 POLLOUT |
 +                                 POLLWRNORM |
 +                                 POLLWRBAND);
 +}
 +
 +/* preconditions:
 + *    - unix_peer(sk) == other
 + *    - association is stable
 + */
 +static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
 +{
 +      int connected;
 +
 +      connected = unix_dgram_peer_wake_connect(sk, other);
 +
 +      if (unix_recvq_full(other))
 +              return 1;
 +
 +      if (connected)
 +              unix_dgram_peer_wake_disconnect(sk, other);
 +
 +      return 0;
 +}
 +
  static inline int unix_writable(struct sock *sk)
  {
        return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
@@@ -497,9 -364,7 +497,9 @@@ static void unix_sock_destructor(struc
        WARN_ON(!sk_unhashed(sk));
        WARN_ON(sk->sk_socket);
        if (!sock_flag(sk, SOCK_DEAD)) {
 -              printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
 +              #ifdef CONFIG_MTK_NET_LOGGING 
 +              printk(KERN_INFO "[mtk_net][unix]Attempt to release alive unix socket: %p\n", sk);
 +              #endif
                return;
        }
  
        local_bh_disable();
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
        local_bh_enable();
 -#ifdef UNIX_REFCNT_DEBUG
 -      printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
 +    #ifdef UNIX_REFCNT_DEBUG
 +      printk(KERN_DEBUG "[mtk_net][unix]UNIX %p is destroyed, %ld are still alive.\n", sk,
                atomic_long_read(&unix_nr_socks));
 -#endif
 +    #endif
  }
  
  static void unix_release_sock(struct sock *sk, int embrion)
                        skpair->sk_state_change(skpair);
                        sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
                }
 +
 +              unix_dgram_peer_wake_disconnect(sk, skpair);
                sock_put(skpair); /* It may now die */
                unix_peer(sk) = NULL;
        }
@@@ -635,7 -498,6 +635,7 @@@ out_unlock
        unix_state_unlock(sk);
        put_pid(old_pid);
  out:
 +   
        return err;
  }
  
@@@ -788,7 -650,6 +788,7 @@@ static struct sock *unix_create1(struc
        INIT_LIST_HEAD(&u->link);
        mutex_init(&u->readlock); /* single task reading lock */
        init_waitqueue_head(&u->peer_wait);
 +      init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
        unix_insert_socket(unix_sockets_unbound(sk), sk);
  out:
        if (sk == NULL)
@@@ -1035,8 -896,7 +1035,8 @@@ static int unix_bind(struct socket *soc
        atomic_set(&addr->refcnt, 1);
  
        if (sun_path[0]) {
 -              struct path path;
 +              struct path path;      
 +
                umode_t mode = S_IFSOCK |
                       (SOCK_INODE(sock)->i_mode & ~current_umask());
                err = unix_mknod(sun_path, mode, &path);
@@@ -1073,7 -933,6 +1073,7 @@@ out_unlock
  out_up:
        mutex_unlock(&u->readlock);
  out:
 + 
        return err;
  }
  
@@@ -1113,7 -972,6 +1113,7 @@@ static int unix_dgram_connect(struct so
        int err;
  
        if (addr->sa_family != AF_UNSPEC) {
 +     
                err = unix_mkname(sunaddr, alen, &hash);
                if (err < 0)
                        goto out;
@@@ -1159,8 -1017,6 +1159,8 @@@ restart
        if (unix_peer(sk)) {
                struct sock *old_peer = unix_peer(sk);
                unix_peer(sk) = other;
 +              unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
 +
                unix_state_double_unlock(sk, other);
  
                if (other != old_peer)
                unix_peer(sk) = other;
                unix_state_double_unlock(sk, other);
        }
 +      
 +#ifdef CONFIG_MTK_NET_LOGGING 
 +    if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
 +    {
 +             printk(KERN_INFO "[mtk_net][socket]unix_dgram_connect[%lu]:connect [%s] other[%lu]\n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
 +        }
 +#endif 
 +            
        return 0;
  
  out_unlock:
        unix_state_double_unlock(sk, other);
        sock_put(other);
  out:
 +     
        return err;
  }
  
@@@ -1367,17 -1214,8 +1367,17 @@@ restart
        __skb_queue_tail(&other->sk_receive_queue, skb);
        spin_unlock(&other->sk_receive_queue.lock);
        unix_state_unlock(other);
 +      
 +      #ifdef CONFIG_MTK_NET_LOGGING 
 +      if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
 +  {
 +        printk(KERN_INFO "[mtk_net][socket]unix_stream_connect[%lu ]: connect [%s] other[%lu] \n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
 +      }
 +  #endif 
 +
        other->sk_data_ready(other, 0);
        sock_put(other);
 +       
        return 0;
  
  out_unlock:
@@@ -1390,7 -1228,6 +1390,7 @@@ out
                unix_release_sock(newsk, 0);
        if (other)
                sock_put(other);
 +    
        return err;
  }
  
@@@ -1442,7 -1279,7 +1442,7 @@@ static int unix_accept(struct socket *s
        /* If socket state is TCP_LISTEN it cannot change (for now...),
         * so that no locks are necessary.
         */
 -
 +    
        skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
        if (!skb) {
                /* This means receive shutdown. */
        unix_sock_inherit_flags(sock, newsock);
        sock_graft(tsk, newsock);
        unix_state_unlock(tsk);
 +    
        return 0;
  
  out:
 +    
        return err;
  }
  
@@@ -1621,8 -1456,7 +1621,8 @@@ static int unix_dgram_sendmsg(struct ki
        struct scm_cookie tmp_scm;
        int max_level;
        int data_len = 0;
 -
 +      int sk_locked;
 +       
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
        wait_for_unix_gc();
@@@ -1698,14 -1532,12 +1698,14 @@@ restart
                goto out_free;
        }
  
 +      sk_locked = 0;
        unix_state_lock(other);
 +restart_locked:
        err = -EPERM;
        if (!unix_may_send(sk, other))
                goto out_unlock;
  
 -      if (sock_flag(other, SOCK_DEAD)) {
 +      if (unlikely(sock_flag(other, SOCK_DEAD))) {
                /*
                 *      Check with 1003.1g - what should
                 *      datagram error
                unix_state_unlock(other);
                sock_put(other);
  
 -              err = 0;
 +              if (!sk_locked)
                unix_state_lock(sk);
                if (unix_peer(sk) == other) {
                        unix_peer(sk) = NULL;
 +                      unix_dgram_peer_wake_disconnect_wakeup(sk, other);
 +
                        unix_state_unlock(sk);
  
                        unix_dgram_disconnected(sk, other);
                        goto out_unlock;
        }
  
 -      if (unix_peer(other) != sk && unix_recvq_full(other)) {
 -              if (!timeo) {
 -                      err = -EAGAIN;
 -                      goto out_unlock;
 +      /* other == sk && unix_peer(other) != sk if
 +       * - unix_peer(sk) == NULL, destination address bound to sk
 +       * - unix_peer(sk) == sk by time of get but disconnected before lock
 +       */
 +      if (other != sk &&
 +          unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
 +              if (timeo) {
 +                      timeo = unix_wait_for_peer(other, timeo);
 +
 +                      err = sock_intr_errno(timeo);
 +                      if (signal_pending(current))
 +                              goto out_free;
 +
 +                      goto restart;
                }
  
 -              timeo = unix_wait_for_peer(other, timeo);
 +              if (!sk_locked) {
 +                      unix_state_unlock(other);
 +                      unix_state_double_lock(sk, other);
 +              }
  
 -              err = sock_intr_errno(timeo);
 -              if (signal_pending(current))
 -                      goto out_free;
 +              if (unix_peer(sk) != other ||
 +                  unix_dgram_peer_wake_me(sk, other)) {
 +                      err = -EAGAIN;
 +                      sk_locked = 1;
 +                      goto out_unlock;
 +              }
  
 -              goto restart;
 +              if (!sk_locked) {
 +                      sk_locked = 1;
 +                      goto restart_locked;
 +              }
        }
  
 +      if (unlikely(sk_locked))
 +              unix_state_unlock(sk);
 +
        if (sock_flag(other, SOCK_RCVTSTAMP))
                __net_timestamp(skb);
        maybe_add_creds(skb, sock, other);
        other->sk_data_ready(other, len);
        sock_put(other);
        scm_destroy(siocb->scm);
 +    
        return len;
  
  out_unlock:
 +      if (sk_locked)
 +              unix_state_unlock(sk);
        unix_state_unlock(other);
  out_free:
        kfree_skb(skb);
@@@ -1804,7 -1609,6 +1804,7 @@@ out
        if (other)
                sock_put(other);
        scm_destroy(siocb->scm);
 +      
        return err;
  }
  
@@@ -1824,7 -1628,6 +1824,7 @@@ static int unix_stream_sendmsg(struct k
  
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
 +              
        wait_for_unix_gc();
        err = scm_send(sock, msg, siocb->scm, false);
        if (err < 0)
  
                skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
                                          &err);
 +              
  
                if (skb == NULL)
                        goto out_err;
  
                if (sock_flag(other, SOCK_DEAD) ||
                    (other->sk_shutdown & RCV_SHUTDOWN))
 +              {
 +                    if( other->sk_socket )
 +                    {
 +                        if(sk->sk_socket)
 +                        {
 +                
 +                         #ifdef CONFIG_MTK_NET_LOGGING 
 +                         printk(KERN_INFO " [mtk_net][unix]: sendmsg[%lu:%lu]:peer close\n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
 +                                       #endif
 +                       }
 +                       else{
 +                                          #ifdef CONFIG_MTK_NET_LOGGING 
 +                                      printk(KERN_INFO " [mtk_net][unix]: sendmsg[null:%lu]:peer close\n" ,SOCK_INODE(other->sk_socket)->i_ino);
 +                                      #endif
 +                       }        
 +
 +                  }
 +                  else        
 +                                      {
 +                                              #ifdef CONFIG_MTK_NET_LOGGING   
 +                                      printk(KERN_INFO " [mtk_net][unix]: sendmsg:peer close \n" );
 +                                      #endif
 +                              }
 +                              
 +          
                        goto pipe_err_free;
 +              }
  
                maybe_add_creds(skb, sock, other);
                skb_queue_tail(&other->sk_receive_queue, skb);
@@@ -1954,7 -1730,6 +1954,7 @@@ pipe_err
  out_err:
        scm_destroy(siocb->scm);
        siocb->scm = NULL;
 +        
        return sent ? : err;
  }
  
@@@ -2096,7 -1871,6 +2096,7 @@@ out_free
  out_unlock:
        mutex_unlock(&u->readlock);
  out:
 +      
        return err;
  }
  
@@@ -2122,8 -1896,12 +2122,12 @@@ static long unix_stream_data_wait(struc
  
                set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
                unix_state_unlock(sk);
 -              timeo = schedule_timeout(timeo);
 +              timeo = freezable_schedule_timeout(timeo);
                unix_state_lock(sk);
+               if (sock_flag(sk, SOCK_DEAD))
+                       break;
                clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
        }
  
@@@ -2148,7 -1926,6 +2152,7 @@@ static int unix_stream_recvmsg(struct k
        int err = 0;
        long timeo;
        int skip;
 +      struct sock * other = unix_peer(sk);
  
        err = -EINVAL;
        if (sk->sk_state != TCP_ESTABLISHED)
                struct sk_buff *skb, *last;
  
                unix_state_lock(sk);
+               if (sock_flag(sk, SOCK_DEAD)) {
+                       err = -ECONNRESET;
+                       goto unlock;
+               }
                last = skb = skb_peek(&sk->sk_receive_queue);
  again:
                if (skb == NULL) {
                        if (err)
                                goto unlock;
                        if (sk->sk_shutdown & RCV_SHUTDOWN)
 +                      {
 +                            if(sk && sk->sk_socket )
 +                            {
 +                                 if(other && other->sk_socket ){
 +                                      #ifdef CONFIG_MTK_NET_LOGGING 
 +                                      
 +                     printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to peer shutdown  \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
 +                                 #endif
 +                                 }else{                                  
 +                                      #ifdef CONFIG_MTK_NET_LOGGING                              
 +                     printk(KERN_INFO "[mtk_net][unix]: recvmsg[%lu:null]:exit read due to peer shutdown  \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
 +                     #endif
 +                                 }
 +                               }
 +                          else{       
 +                                      #ifdef CONFIG_MTK_NET_LOGGING 
 +                                 printk(KERN_INFO " [mtk_net][unix]: recvmsg: exit read due to peer shutdown \n" );
 +                                 #endif
 +                          }
                                goto unlock;
 -
 +                      }
                        unix_state_unlock(sk);
                        err = -EAGAIN;
                        if (!timeo)
                        mutex_unlock(&u->readlock);
  
                        timeo = unix_stream_data_wait(sk, timeo, last);
 +                        if (!timeo)
 +                        {
 +                            if(sk && sk->sk_socket )
 +                            {
 +                                if(other && other->sk_socket ){
 +                                      #ifdef CONFIG_MTK_NET_LOGGING 
 +                     printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to timeout  \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
 +                                 #endif
 +                                 }else{                                  
 +                                      #ifdef CONFIG_MTK_NET_LOGGING                              
 +                     printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:null]:exit read due to timeout  \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
 +                     #endif
 +                                  }                     
 +                         }
 +                         else 
 +                                      {
 +                                              #ifdef CONFIG_MTK_NET_LOGGING   
 +                                printk(KERN_INFO " [mtk_net][unix]: recvmsg:exit read due to timeout \n" );
 +                                #endif
 +                              }
 +                                
 +                       }
  
                        if (signal_pending(current)
                            ||  mutex_lock_interruptible(&u->readlock)) {
        mutex_unlock(&u->readlock);
        scm_recv(sock, msg, siocb->scm, flags);
  out:
 +  
        return copied ? : err;
  }
  
@@@ -2501,29 -2240,22 +2509,29 @@@ static unsigned int unix_dgram_poll(str
                        mask |= POLLHUP;
                /* connection hasn't started yet? */
                if (sk->sk_state == TCP_SYN_SENT)
 +    {
 +        
                        return mask;
 -      }
 +        }
 +  }
  
        /* No write status requested, avoid expensive OUT tests. */
        if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
 +  {     
                return mask;
 +  }
  
        writable = unix_writable(sk);
 -      other = unix_peer_get(sk);
 -      if (other) {
 -              if (unix_peer(other) != sk) {
 -                      sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
 -                      if (unix_recvq_full(other))
 -                              writable = 0;
 -              }
 -              sock_put(other);
 +      if (writable) {
 +              unix_state_lock(sk);
 +
 +              other = unix_peer(sk);
 +              if (other && unix_peer(other) != sk &&
 +                  unix_recvq_full(other) &&
 +                  unix_dgram_peer_wake_me(sk, other))
 +                      writable = 0;
 +
 +              unix_state_unlock(sk);
        }
  
        if (writable)