#include <linux/mount.h>
#include <net/checksum.h>
#include <linux/security.h>
+#include <linux/freezer.h>
+
+
+#include <linux/uio.h>
+#include <linux/blkdev.h>
+#include <linux/compat.h>
+#include <linux/rtc.h>
+#include <asm/kmap_types.h>
+#include <linux/device.h>
+
struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
EXPORT_SYMBOL_GPL(unix_socket_table);
#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
+
+//for aee interface start
+#define __UNIX_SOCKET_OUTPUT_BUF_SIZE__ 3500
+static struct proc_dir_entry *gunix_socket_track_aee_entry = NULL;
+#define UNIX_SOCK_TRACK_AEE_PROCNAME "driver/usktrk_aee"
+#define UNIX_SOCK_TRACK_PROC_AEE_SIZE 3072
+
+static volatile unsigned int unix_sock_track_stop_flag = 0;
+#define unix_peer(sk) (unix_sk(sk)->peer)
+
+
#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
return hash&(UNIX_HASH_SIZE-1);
}
-#define unix_peer(sk) (unix_sk(sk)->peer)
+
static inline int unix_our_peer(struct sock *sk, struct sock *osk)
{
WARN_ON(!sk_unhashed(sk));
WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
- printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][unix]Attempt to release alive unix socket: %p\n", sk);
+ #endif
return;
}
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
-#ifdef UNIX_REFCNT_DEBUG
- printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
+ #ifdef UNIX_REFCNT_DEBUG
+ printk(KERN_DEBUG "[mtk_net][unix]UNIX %p is destroyed, %ld are still alive.\n", sk,
atomic_long_read(&unix_nr_socks));
-#endif
+ #endif
}
static void unix_release_sock(struct sock *sk, int embrion)
unix_state_unlock(sk);
put_pid(old_pid);
out:
+
return err;
}
atomic_set(&addr->refcnt, 1);
if (sun_path[0]) {
- struct path path;
+ struct path path;
+
umode_t mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current_umask());
err = unix_mknod(sun_path, mode, &path);
out_up:
mutex_unlock(&u->readlock);
out:
+
return err;
}
int err;
if (addr->sa_family != AF_UNSPEC) {
+
err = unix_mkname(sunaddr, alen, &hash);
if (err < 0)
goto out;
unix_peer(sk) = other;
unix_state_double_unlock(sk, other);
}
+
+#ifdef CONFIG_MTK_NET_LOGGING
+ if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
+ {
+ printk(KERN_INFO "[mtk_net][socket]unix_dgram_connect[%lu]:connect [%s] other[%lu]\n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
+ }
+#endif
+
return 0;
out_unlock:
unix_state_double_unlock(sk, other);
sock_put(other);
out:
+
return err;
}
__skb_queue_tail(&other->sk_receive_queue, skb);
spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other);
+
+ #ifdef CONFIG_MTK_NET_LOGGING
+ if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
+ {
+ printk(KERN_INFO "[mtk_net][socket]unix_stream_connect[%lu ]: connect [%s] other[%lu] \n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
+ }
+ #endif
+
other->sk_data_ready(other, 0);
sock_put(other);
+
return 0;
out_unlock:
unix_release_sock(newsk, 0);
if (other)
sock_put(other);
+
return err;
}
/* If socket state is TCP_LISTEN it cannot change (for now...),
* so that no locks are necessary.
*/
-
+
skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
if (!skb) {
/* This means receive shutdown. */
unix_sock_inherit_flags(sock, newsock);
sock_graft(tsk, newsock);
unix_state_unlock(tsk);
+
return 0;
out:
+
return err;
}
int max_level;
int data_len = 0;
int sk_locked;
+<<<<<<< HEAD
+
+=======
+>>>>>>> v3.10.95
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
wait_for_unix_gc();
sock_put(other);
if (!sk_locked)
+<<<<<<< HEAD
+ unix_state_lock(sk);
+=======
unix_state_lock(sk);
err = 0;
+>>>>>>> v3.10.95
if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
goto out_unlock;
}
+<<<<<<< HEAD
+ /* other == sk && unix_peer(other) != sk if
+ * - unix_peer(sk) == NULL, destination address bound to sk
+ * - unix_peer(sk) == sk by time of get but disconnected before lock
+ */
+ if (other != sk &&
+ unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+=======
if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+>>>>>>> v3.10.95
if (timeo) {
timeo = unix_wait_for_peer(other, timeo);
other->sk_data_ready(other, len);
sock_put(other);
scm_destroy(siocb->scm);
+
return len;
out_unlock:
if (other)
sock_put(other);
scm_destroy(siocb->scm);
+
return err;
}
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
+
wait_for_unix_gc();
err = scm_send(sock, msg, siocb->scm, false);
if (err < 0)
skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
&err);
+
if (skb == NULL)
goto out_err;
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN))
+ {
+ if( other->sk_socket )
+ {
+ if(sk->sk_socket)
+ {
+
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: sendmsg[%lu:%lu]:peer close\n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }
+ else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: sendmsg[null:%lu]:peer close\n" ,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }
+
+ }
+ else
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: sendmsg:peer close \n" );
+ #endif
+ }
+
+
goto pipe_err_free;
+ }
maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
out_err:
scm_destroy(siocb->scm);
siocb->scm = NULL;
+
return sent ? : err;
}
out_unlock:
mutex_unlock(&u->readlock);
out:
+
return err;
}
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
unix_state_unlock(sk);
- timeo = schedule_timeout(timeo);
+ timeo = freezable_schedule_timeout(timeo);
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD))
int err = 0;
long timeo;
int skip;
+ struct sock * other = unix_peer(sk);
err = -EINVAL;
if (sk->sk_state != TCP_ESTABLISHED)
if (err)
goto unlock;
if (sk->sk_shutdown & RCV_SHUTDOWN)
+ {
+ if(sk && sk->sk_socket )
+ {
+ if(other && other->sk_socket ){
+ #ifdef CONFIG_MTK_NET_LOGGING
+
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to peer shutdown \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][unix]: recvmsg[%lu:null]:exit read due to peer shutdown \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
+ #endif
+ }
+ }
+ else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg: exit read due to peer shutdown \n" );
+ #endif
+ }
goto unlock;
-
+ }
unix_state_unlock(sk);
err = -EAGAIN;
if (!timeo)
mutex_unlock(&u->readlock);
timeo = unix_stream_data_wait(sk, timeo, last);
+ if (!timeo)
+ {
+ if(sk && sk->sk_socket )
+ {
+ if(other && other->sk_socket ){
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to timeout \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:null]:exit read due to timeout \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
+ #endif
+ }
+ }
+ else
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg:exit read due to timeout \n" );
+ #endif
+ }
+
+ }
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
mutex_unlock(&u->readlock);
scm_recv(sock, msg, siocb->scm, flags);
out:
+
return copied ? : err;
}
mask |= POLLHUP;
/* connection hasn't started yet? */
if (sk->sk_state == TCP_SYN_SENT)
+ {
+
return mask;
- }
+ }
+ }
/* No write status requested, avoid expensive OUT tests. */
if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
+ {
return mask;
+ }
writable = unix_writable(sk);
if (writable) {