#include <linux/kobj_map.h>
#include <linux/mutex.h>
#include <linux/idr.h>
+#include <linux/ctype.h>
+#include <linux/fs_uuid.h>
#include <linux/log2.h>
#include <linux/pm_runtime.h>
/* allocate ext devt */
idr_preload(GFP_KERNEL);
- spin_lock(&ext_devt_lock);
+ spin_lock_bh(&ext_devt_lock);
idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
- spin_unlock(&ext_devt_lock);
+ spin_unlock_bh(&ext_devt_lock);
idr_preload_end();
if (idx < 0)
return;
if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
- spin_lock(&ext_devt_lock);
+ spin_lock_bh(&ext_devt_lock);
idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
- spin_unlock(&ext_devt_lock);
+ spin_unlock_bh(&ext_devt_lock);
}
}
} else {
struct hd_struct *part;
- spin_lock(&ext_devt_lock);
+ spin_lock_bh(&ext_devt_lock);
part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
if (part && get_disk(part_to_disk(part))) {
*partno = part->partno;
disk = part_to_disk(part);
}
- spin_unlock(&ext_devt_lock);
+ spin_unlock_bh(&ext_devt_lock);
}
return disk;
if (iter) {
class_dev_iter_exit(iter);
kfree(iter);
+ seqf->private = NULL;
}
}
blk_put_queue(disk->queue);
kfree(disk);
}
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ int cnt = 0;
+
+ disk_part_iter_init(&piter, disk, 0);
+ while((part = disk_part_iter_next(&piter)))
+ cnt++;
+ disk_part_iter_exit(&piter);
+ add_uevent_var(env, "NPARTS=%u", cnt);
+ return 0;
+}
+
struct class block_class = {
.name = "block",
};
.groups = disk_attr_groups,
.release = disk_release,
.devnode = block_devnode,
+ .uevent = disk_uevent,
};
#ifdef CONFIG_PROC_FS
EXPORT_SYMBOL(invalidate_partition);
+dev_t blk_lookup_fs_info(struct fs_info *seek)
+{
+ dev_t devt = MKDEV(0, 0);
+ struct class_dev_iter iter;
+ struct device *dev;
+ int best_score = 0;
+
+ class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
+ while (best_score < 3 && (dev = class_dev_iter_next(&iter))) {
+ struct gendisk *disk = dev_to_disk(dev);
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+
+ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+
+ while (best_score < 3 && (part = disk_part_iter_next(&piter))) {
+ int score = part_matches_fs_info(part, seek);
+ if (score > best_score) {
+ devt = part_devt(part);
+ best_score = score;
+ }
+ }
+ disk_part_iter_exit(&piter);
+ }
+ class_dev_iter_exit(&iter);
+ return devt;
+}
+EXPORT_SYMBOL_GPL(blk_lookup_fs_info);
+
+/* Caller uses NULL, key to start. For each match found, we return a bdev on
+ * which we have done blkdev_get, and we do the blkdev_put on block devices
+ * that are passed to us. When no more matches are found, we return NULL.
+ */
+struct block_device *next_bdev_of_type(struct block_device *last,
+ const char *key)
+{
+ dev_t devt = MKDEV(0, 0);
+ struct class_dev_iter iter;
+ struct device *dev;
+ struct block_device *next = NULL, *bdev;
+ int got_last = 0;
+
+ if (!key)
+ goto out;
+
+ class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
+ while (!devt && (dev = class_dev_iter_next(&iter))) {
+ struct gendisk *disk = dev_to_disk(dev);
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+
+ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+
+ while ((part = disk_part_iter_next(&piter))) {
+ bdev = bdget(part_devt(part));
+ if (last && !got_last) {
+ if (last == bdev)
+ got_last = 1;
+ continue;
+ }
+
+ if (blkdev_get(bdev, FMODE_READ, 0))
+ continue;
+
+ if (bdev_matches_key(bdev, key)) {
+ next = bdev;
+ break;
+ }
+
+ blkdev_put(bdev, FMODE_READ);
+ }
+ disk_part_iter_exit(&piter);
+ }
+ class_dev_iter_exit(&iter);
+out:
+ if (last)
+ blkdev_put(last, FMODE_READ);
+ return next;
+}
+EXPORT_SYMBOL_GPL(next_bdev_of_type);
+
/*
* Disk events - monitor disk events like media change and eject request.
*/
static const char *disk_events_strs[] = {
[ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change",
[ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request",
+#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT
+ [ilog2(DISK_EVENT_MEDIA_DISAPPEAR)] = "media_disappear",
+#endif
};
static char *disk_uevents[] = {
[ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1",
[ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1",
+#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT
+ [ilog2(DISK_EVENT_MEDIA_DISAPPEAR)] = "DISK_EVENT_MEDIA_DISAPPEAR=1",
+#endif
};
/* list of all disk_events */
static LIST_HEAD(disk_events);
/* disable in-kernel polling by default */
-static unsigned long disk_events_dfl_poll_msecs = 0;
+//ALPS00319570, CL955952 merged back, begin
+//static unsigned long disk_events_dfl_poll_msecs = 0; //original
+static unsigned long disk_events_dfl_poll_msecs = 2000;
+//ALPS00319570, CL955952 merged back, end
static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
{
}
EXPORT_SYMBOL(__ip_select_ident);
-static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+static void __build_flow_key(struct flowi4 *fl4, struct sock *sk,
const struct iphdr *iph,
int oif, u8 tos,
u8 prot, u32 mark, int flow_flags)
flowi4_init_output(fl4, oif, mark, tos,
RT_SCOPE_UNIVERSE, prot,
flow_flags,
- iph->daddr, iph->saddr, 0, 0);
+ iph->daddr, iph->saddr, 0, 0,
+ sk ? sock_i_uid(sk) : 0);
}
static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
- const struct sock *sk)
+ struct sock *sk)
{
const struct iphdr *iph = ip_hdr(skb);
int oif = skb->dev->ifindex;
__build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
}
-static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
+static void build_sk_flow_key(struct flowi4 *fl4, struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk),
- daddr, inet->inet_saddr, 0, 0);
+ daddr, inet->inet_saddr, 0, 0,
+ sock_i_uid(sk));
rcu_read_unlock();
}
-static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+static void ip_rt_build_flow_key(struct flowi4 *fl4, struct sock *sk,
const struct sk_buff *skb)
{
if (skb)
bool send;
int code;
+ /* IP on this device is disabled. */
+ if (!in_dev)
+ goto out;
+
net = dev_net(rt->dst.dev);
if (!IN_DEV_FORWARD(in_dev)) {
switch (rt->dst.error) {
struct flowi4 fl4;
struct rtable *rt;
+ if (!mark)
+ mark = IP4_REPLY_MARK(net, skb->mark);
+
__build_flow_key(&fl4, NULL, iph, oif,
RT_TOS(iph->tos), protocol, mark, flow_flags);
rt = __ip_route_output_key(net, &fl4);
struct rtable *rt;
__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+
+ if (!fl4.flowi4_mark)
+ fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
+
rt = __ip_route_output_key(sock_net(sk), &fl4);
if (!IS_ERR(rt)) {
__ip_rt_update_pmtu(rt, &fl4, mtu);
nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
goto nla_put_failure;
+ if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
+ nla_put_u32(skb, RTA_UID,
+ from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
+ goto nla_put_failure;
+
error = rt->dst.error;
if (rt_is_input_route(rt)) {
int err;
int mark;
struct sk_buff *skb;
+ kuid_t uid;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
if (err < 0)
dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
+ if (tb[RTA_UID])
+ uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
+ else
+ uid = (iif ? INVALID_UID : current_uid());
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = dst;
fl4.flowi4_tos = rtm->rtm_tos;
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
fl4.flowi4_mark = mark;
+ fl4.flowi4_uid = uid;
if (iif) {
struct net_device *dev;
#include <linux/mount.h>
#include <net/checksum.h>
#include <linux/security.h>
+#include <linux/freezer.h>
+
+
+#include <linux/uio.h>
+#include <linux/blkdev.h>
+#include <linux/compat.h>
+#include <linux/rtc.h>
+#include <asm/kmap_types.h>
+#include <linux/device.h>
+
struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
EXPORT_SYMBOL_GPL(unix_socket_table);
#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
+
+//for aee interface start
+#define __UNIX_SOCKET_OUTPUT_BUF_SIZE__ 3500
+static struct proc_dir_entry *gunix_socket_track_aee_entry = NULL;
+#define UNIX_SOCK_TRACK_AEE_PROCNAME "driver/usktrk_aee"
+#define UNIX_SOCK_TRACK_PROC_AEE_SIZE 3072
+
+static volatile unsigned int unix_sock_track_stop_flag = 0;
+#define unix_peer(sk) (unix_sk(sk)->peer)
+
+
#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
return hash&(UNIX_HASH_SIZE-1);
}
-#define unix_peer(sk) (unix_sk(sk)->peer)
+
static inline int unix_our_peer(struct sock *sk, struct sock *osk)
{
return s;
}
+/* Support code for asymmetrically connected dgram sockets
+ *
+ * If a datagram socket is connected to a socket not itself connected
+ * to the first socket (eg, /dev/log), clients may only enqueue more
+ * messages if the present receive queue of the server socket is not
+ * "too large". This means there's a second writeability condition
+ * poll and sendmsg need to test. The dgram recv code will do a wake
+ * up on the peer_wait wait queue of a socket upon reception of a
+ * datagram which needs to be propagated to sleeping would-be writers
+ * since these might not have sent anything so far. This can't be
+ * accomplished via poll_wait because the lifetime of the server
+ * socket might be less than that of its clients if these break their
+ * association with it or if the server socket is closed while clients
+ * are still connected to it and there's no way to inform "a polling
+ * implementation" that it should let go of a certain wait queue
+ *
+ * In order to propagate a wake up, a wait_queue_t of the client
+ * socket is enqueued on the peer_wait queue of the server socket
+ * whose wake function does a wake_up on the ordinary client socket
+ * wait queue. This connection is established whenever a write (or
+ * poll for write) hit the flow control condition and broken when the
+ * association to the server socket is dissolved or after a wake up
+ * was relayed.
+ */
+
+static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
+ void *key)
+{
+ struct unix_sock *u;
+ wait_queue_head_t *u_sleep;
+
+ u = container_of(q, struct unix_sock, peer_wake);
+
+ __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
+ q);
+ u->peer_wake.private = NULL;
+
+ /* relaying can only happen while the wq still exists */
+ u_sleep = sk_sleep(&u->sk);
+ if (u_sleep)
+ wake_up_interruptible_poll(u_sleep, key);
+
+ return 0;
+}
+
+static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
+{
+ struct unix_sock *u, *u_other;
+ int rc;
+
+ u = unix_sk(sk);
+ u_other = unix_sk(other);
+ rc = 0;
+ spin_lock(&u_other->peer_wait.lock);
+
+ if (!u->peer_wake.private) {
+ u->peer_wake.private = other;
+ __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
+
+ rc = 1;
+ }
+
+ spin_unlock(&u_other->peer_wait.lock);
+ return rc;
+}
+
+static void unix_dgram_peer_wake_disconnect(struct sock *sk,
+ struct sock *other)
+{
+ struct unix_sock *u, *u_other;
+
+ u = unix_sk(sk);
+ u_other = unix_sk(other);
+ spin_lock(&u_other->peer_wait.lock);
+
+ if (u->peer_wake.private == other) {
+ __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
+ u->peer_wake.private = NULL;
+ }
+
+ spin_unlock(&u_other->peer_wait.lock);
+}
+
+static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
+ struct sock *other)
+{
+ unix_dgram_peer_wake_disconnect(sk, other);
+ wake_up_interruptible_poll(sk_sleep(sk),
+ POLLOUT |
+ POLLWRNORM |
+ POLLWRBAND);
+}
+
+/* preconditions:
+ * - unix_peer(sk) == other
+ * - association is stable
+ */
+static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
+{
+ int connected;
+
+ connected = unix_dgram_peer_wake_connect(sk, other);
+
+ if (unix_recvq_full(other))
+ return 1;
+
+ if (connected)
+ unix_dgram_peer_wake_disconnect(sk, other);
+
+ return 0;
+}
+
static inline int unix_writable(struct sock *sk)
{
return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
WARN_ON(!sk_unhashed(sk));
WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
- printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][unix]Attempt to release alive unix socket: %p\n", sk);
+ #endif
return;
}
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
-#ifdef UNIX_REFCNT_DEBUG
- printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
+ #ifdef UNIX_REFCNT_DEBUG
+ printk(KERN_DEBUG "[mtk_net][unix]UNIX %p is destroyed, %ld are still alive.\n", sk,
atomic_long_read(&unix_nr_socks));
-#endif
+ #endif
}
static void unix_release_sock(struct sock *sk, int embrion)
skpair->sk_state_change(skpair);
sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
}
+
+ unix_dgram_peer_wake_disconnect(sk, skpair);
sock_put(skpair); /* It may now die */
unix_peer(sk) = NULL;
}
unix_state_unlock(sk);
put_pid(old_pid);
out:
+
return err;
}
INIT_LIST_HEAD(&u->link);
mutex_init(&u->readlock); /* single task reading lock */
init_waitqueue_head(&u->peer_wait);
+ init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
unix_insert_socket(unix_sockets_unbound(sk), sk);
out:
if (sk == NULL)
atomic_set(&addr->refcnt, 1);
if (sun_path[0]) {
- struct path path;
+ struct path path;
+
umode_t mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current_umask());
err = unix_mknod(sun_path, mode, &path);
out_up:
mutex_unlock(&u->readlock);
out:
+
return err;
}
int err;
if (addr->sa_family != AF_UNSPEC) {
+
err = unix_mkname(sunaddr, alen, &hash);
if (err < 0)
goto out;
if (unix_peer(sk)) {
struct sock *old_peer = unix_peer(sk);
unix_peer(sk) = other;
+ unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
+
unix_state_double_unlock(sk, other);
if (other != old_peer)
unix_peer(sk) = other;
unix_state_double_unlock(sk, other);
}
+
+#ifdef CONFIG_MTK_NET_LOGGING
+ if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
+ {
+ printk(KERN_INFO "[mtk_net][socket]unix_dgram_connect[%lu]:connect [%s] other[%lu]\n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
+ }
+#endif
+
return 0;
out_unlock:
unix_state_double_unlock(sk, other);
sock_put(other);
out:
+
return err;
}
__skb_queue_tail(&other->sk_receive_queue, skb);
spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other);
+
+ #ifdef CONFIG_MTK_NET_LOGGING
+ if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
+ {
+ printk(KERN_INFO "[mtk_net][socket]unix_stream_connect[%lu ]: connect [%s] other[%lu] \n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
+ }
+ #endif
+
other->sk_data_ready(other, 0);
sock_put(other);
+
return 0;
out_unlock:
unix_release_sock(newsk, 0);
if (other)
sock_put(other);
+
return err;
}
/* If socket state is TCP_LISTEN it cannot change (for now...),
* so that no locks are necessary.
*/
-
+
skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
if (!skb) {
/* This means receive shutdown. */
unix_sock_inherit_flags(sock, newsock);
sock_graft(tsk, newsock);
unix_state_unlock(tsk);
+
return 0;
out:
+
return err;
}
struct scm_cookie tmp_scm;
int max_level;
int data_len = 0;
-
+ int sk_locked;
+
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
wait_for_unix_gc();
goto out_free;
}
+ sk_locked = 0;
unix_state_lock(other);
+restart_locked:
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
- if (sock_flag(other, SOCK_DEAD)) {
+ if (unlikely(sock_flag(other, SOCK_DEAD))) {
/*
* Check with 1003.1g - what should
* datagram error
unix_state_unlock(other);
sock_put(other);
- err = 0;
+ if (!sk_locked)
unix_state_lock(sk);
if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
+ unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
goto out_unlock;
}
- if (unix_peer(other) != sk && unix_recvq_full(other)) {
- if (!timeo) {
- err = -EAGAIN;
- goto out_unlock;
+ /* other == sk && unix_peer(other) != sk if
+ * - unix_peer(sk) == NULL, destination address bound to sk
+ * - unix_peer(sk) == sk by time of get but disconnected before lock
+ */
+ if (other != sk &&
+ unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+ if (timeo) {
+ timeo = unix_wait_for_peer(other, timeo);
+
+ err = sock_intr_errno(timeo);
+ if (signal_pending(current))
+ goto out_free;
+
+ goto restart;
}
- timeo = unix_wait_for_peer(other, timeo);
+ if (!sk_locked) {
+ unix_state_unlock(other);
+ unix_state_double_lock(sk, other);
+ }
- err = sock_intr_errno(timeo);
- if (signal_pending(current))
- goto out_free;
+ if (unix_peer(sk) != other ||
+ unix_dgram_peer_wake_me(sk, other)) {
+ err = -EAGAIN;
+ sk_locked = 1;
+ goto out_unlock;
+ }
- goto restart;
+ if (!sk_locked) {
+ sk_locked = 1;
+ goto restart_locked;
+ }
}
+ if (unlikely(sk_locked))
+ unix_state_unlock(sk);
+
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
maybe_add_creds(skb, sock, other);
other->sk_data_ready(other, len);
sock_put(other);
scm_destroy(siocb->scm);
+
return len;
out_unlock:
+ if (sk_locked)
+ unix_state_unlock(sk);
unix_state_unlock(other);
out_free:
kfree_skb(skb);
if (other)
sock_put(other);
scm_destroy(siocb->scm);
+
return err;
}
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
+
wait_for_unix_gc();
err = scm_send(sock, msg, siocb->scm, false);
if (err < 0)
skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
&err);
+
if (skb == NULL)
goto out_err;
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN))
+ {
+ if( other->sk_socket )
+ {
+ if(sk->sk_socket)
+ {
+
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: sendmsg[%lu:%lu]:peer close\n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }
+ else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: sendmsg[null:%lu]:peer close\n" ,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }
+
+ }
+ else
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: sendmsg:peer close \n" );
+ #endif
+ }
+
+
goto pipe_err_free;
+ }
maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
out_err:
scm_destroy(siocb->scm);
siocb->scm = NULL;
+
return sent ? : err;
}
out_unlock:
mutex_unlock(&u->readlock);
out:
+
return err;
}
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
unix_state_unlock(sk);
- timeo = schedule_timeout(timeo);
+ timeo = freezable_schedule_timeout(timeo);
unix_state_lock(sk);
+
+ if (sock_flag(sk, SOCK_DEAD))
+ break;
+
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
int err = 0;
long timeo;
int skip;
+ struct sock * other = unix_peer(sk);
err = -EINVAL;
if (sk->sk_state != TCP_ESTABLISHED)
struct sk_buff *skb, *last;
unix_state_lock(sk);
+ if (sock_flag(sk, SOCK_DEAD)) {
+ err = -ECONNRESET;
+ goto unlock;
+ }
last = skb = skb_peek(&sk->sk_receive_queue);
again:
if (skb == NULL) {
if (err)
goto unlock;
if (sk->sk_shutdown & RCV_SHUTDOWN)
+ {
+ if(sk && sk->sk_socket )
+ {
+ if(other && other->sk_socket ){
+ #ifdef CONFIG_MTK_NET_LOGGING
+
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to peer shutdown \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][unix]: recvmsg[%lu:null]:exit read due to peer shutdown \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
+ #endif
+ }
+ }
+ else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg: exit read due to peer shutdown \n" );
+ #endif
+ }
goto unlock;
-
+ }
unix_state_unlock(sk);
err = -EAGAIN;
if (!timeo)
mutex_unlock(&u->readlock);
timeo = unix_stream_data_wait(sk, timeo, last);
+ if (!timeo)
+ {
+ if(sk && sk->sk_socket )
+ {
+ if(other && other->sk_socket ){
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to timeout \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:null]:exit read due to timeout \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
+ #endif
+ }
+ }
+ else
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg:exit read due to timeout \n" );
+ #endif
+ }
+
+ }
if (signal_pending(current)
|| mutex_lock_interruptible(&u->readlock)) {
mutex_unlock(&u->readlock);
scm_recv(sock, msg, siocb->scm, flags);
out:
+
return copied ? : err;
}
mask |= POLLHUP;
/* connection hasn't started yet? */
if (sk->sk_state == TCP_SYN_SENT)
+ {
+
return mask;
- }
+ }
+ }
/* No write status requested, avoid expensive OUT tests. */
if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
+ {
return mask;
+ }
writable = unix_writable(sk);
- other = unix_peer_get(sk);
- if (other) {
- if (unix_peer(other) != sk) {
- sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
- if (unix_recvq_full(other))
- writable = 0;
- }
- sock_put(other);
+ if (writable) {
+ unix_state_lock(sk);
+
+ other = unix_peer(sk);
+ if (other && unix_peer(other) != sk &&
+ unix_recvq_full(other) &&
+ unix_dgram_peer_wake_me(sk, other))
+ writable = 0;
+
+ unix_state_unlock(sk);
}
if (writable)