static inline int unix_may_send(struct sock *sk, struct sock *osk)
{
- return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
+ return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
}
static inline int unix_recvq_full(struct sock const *sk)
* - if started by zero, it is abstract name.
*/
-static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
+static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
{
if (len <= sizeof(short) || len > sizeof(*sunaddr))
return -EINVAL;
return len;
}
- *hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
+ *hashp = unix_hash_fold(csum_partial((char *)sunaddr, len, 0));
return len;
}
if (!net_eq(sock_net(s), net))
continue;
- if(dentry && dentry->d_inode == i)
- {
+ if (dentry && dentry->d_inode == i) {
sock_hold(s);
goto found;
}
WARN_ON(!sk_unhashed(sk));
WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
- printk("Attempt to release alive unix socket: %p\n", sk);
+ printk(KERN_DEBUG "Attempt to release alive unix socket: %p\n", sk);
return;
}
atomic_dec(&unix_nr_socks);
#ifdef UNIX_REFCNT_DEBUG
- printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
+ printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk,
+ atomic_read(&unix_nr_socks));
#endif
}
-static int unix_release_sock (struct sock *sk, int embrion)
+static int unix_release_sock(struct sock *sk, int embrion)
{
struct unix_sock *u = unix_sk(sk);
struct dentry *dentry;
struct unix_sock *u = unix_sk(sk);
err = -EOPNOTSUPP;
- if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
- goto out; /* Only stream/seqpacket sockets accept */
+ if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
+ goto out; /* Only stream/seqpacket sockets accept */
err = -EINVAL;
if (!u->addr)
- goto out; /* No listens on an unbound socket */
+ goto out; /* No listens on an unbound socket */
unix_state_lock(sk);
if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
goto out_unlock;
*/
static struct lock_class_key af_unix_sk_receive_queue_lock_key;
-static struct sock * unix_create1(struct net *net, struct socket *sock)
+static struct sock *unix_create1(struct net *net, struct socket *sock)
{
struct sock *sk = NULL;
struct unix_sock *u;
if (!sk)
goto out;
- sock_init_data(sock,sk);
+ sock_init_data(sock, sk);
lockdep_set_class(&sk->sk_receive_queue.lock,
&af_unix_sk_receive_queue_lock_key);
sock->sk = NULL;
- return unix_release_sock (sk, 0);
+ return unix_release_sock(sk, 0);
}
static int unix_autobind(struct socket *sock)
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
static u32 ordernum = 1;
- struct unix_address * addr;
+ struct unix_address *addr;
int err;
mutex_lock(&u->readlock);
retry:
addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
- addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
+ addr->hash = unix_hash_fold(csum_partial((void *)addr->name, addr->len, 0));
spin_lock(&unix_table_lock);
ordernum = (ordernum+1)&0xFFFFF;
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
- struct dentry * dentry = NULL;
+ struct dentry *dentry = NULL;
struct nameidata nd;
int err;
unsigned hash;
int err;
err = -EOPNOTSUPP;
- if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
+ if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out;
err = -EINVAL;
skb->destructor = sock_wfree;
UNIXCB(skb).fp = NULL;
- for (i=scm->fp->count-1; i>=0; i--)
+ for (i = scm->fp->count-1; i >= 0; i--)
unix_notinflight(scm->fp->fp[i]);
}
if (!UNIXCB(skb).fp)
return -ENOMEM;
- for (i=scm->fp->count-1; i>=0; i--)
+ for (i = scm->fp->count-1; i >= 0; i--)
unix_inflight(scm->fp->fp[i]);
skb->destructor = unix_destruct_fds;
return 0;
unix_get_secdata(siocb->scm, skb);
skb_reset_transport_header(skb);
- err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
+ err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (err)
goto out_free;
struct sock *sk = sock->sk;
struct sock *other = NULL;
struct sockaddr_un *sunaddr = msg->msg_name;
- int err,size;
+ int err, size;
struct sk_buff *skb;
int sent = 0;
struct scm_cookie tmp_scm;
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto pipe_err;
- while(sent < len)
- {
+ while (sent < len) {
/*
* Optimisation for the fact that under 0.01% of X
* messages typically need breaking up.
* Grab a buffer
*/
- skb = sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
+ skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
+ &err);
if (skb == NULL)
goto out_err;
}
}
- if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
+ err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+ if (err) {
kfree_skb(skb);
goto out_err;
}
unix_state_unlock(other);
kfree_skb(skb);
pipe_err:
- if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
- send_sig(SIGPIPE,current,0);
+ if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
+ send_sig(SIGPIPE, current, 0);
err = -EPIPE;
out_err:
scm_destroy(siocb->scm);
siocb->scm->creds = *UNIXCREDS(skb);
unix_set_secdata(siocb->scm, skb);
- if (!(flags & MSG_PEEK))
- {
+ if (!(flags & MSG_PEEK)) {
if (UNIXCB(skb).fp)
unix_detach_fds(siocb->scm, skb);
- }
- else
- {
+ } else {
/* It is questionable: on PEEK we could:
- do not return fds - good, but too simple 8)
- return fds, and do not return them on read (old strategy,
scm_recv(sock, msg, siocb->scm, flags);
out_free:
- skb_free_datagram(sk,skb);
+ skb_free_datagram(sk, skb);
out_unlock:
mutex_unlock(&u->readlock);
out:
* Sleep until data has arrive. But check for races..
*/
-static long unix_stream_data_wait(struct sock * sk, long timeo)
+static long unix_stream_data_wait(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
mutex_lock(&u->readlock);
- do
- {
+ do {
int chunk;
struct sk_buff *skb;
unix_state_lock(sk);
skb = skb_dequeue(&sk->sk_receive_queue);
- if (skb == NULL)
- {
+ if (skb == NULL) {
if (copied >= target)
goto unlock;
* POSIX 1003.1g mandates this order.
*/
- if ((err = sock_error(sk)) != 0)
+ err = sock_error(sk);
+ if (err)
goto unlock;
if (sk->sk_shutdown & RCV_SHUTDOWN)
goto unlock;
if (check_creds) {
/* Never glue messages from different writers */
- if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
+ if (memcmp(UNIXCREDS(skb), &siocb->scm->creds,
+ sizeof(siocb->scm->creds)) != 0) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
}
/* Copy address just once */
- if (sunaddr)
- {
+ if (sunaddr) {
unix_copy_addr(msg, skb->sk);
sunaddr = NULL;
}
size -= chunk;
/* Mark read part of skb as used */
- if (!(flags & MSG_PEEK))
- {
+ if (!(flags & MSG_PEEK)) {
skb_pull(skb, chunk);
if (UNIXCB(skb).fp)
unix_detach_fds(siocb->scm, skb);
/* put the skb back if we didn't use it up.. */
- if (skb->len)
- {
+ if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
if (siocb->scm->fp)
break;
- }
- else
- {
+ } else {
/* It is questionable, see note in unix_dgram_recvmsg.
*/
if (UNIXCB(skb).fp)
long amount = 0;
int err;
- switch(cmd)
- {
- case SIOCOUTQ:
- amount = atomic_read(&sk->sk_wmem_alloc);
- err = put_user(amount, (int __user *)arg);
- break;
- case SIOCINQ:
+ switch (cmd) {
+ case SIOCOUTQ:
+ amount = atomic_read(&sk->sk_wmem_alloc);
+ err = put_user(amount, (int __user *)arg);
+ break;
+ case SIOCINQ:
{
struct sk_buff *skb;
break;
}
- default:
- err = -ENOIOCTLCMD;
- break;
+ default:
+ err = -ENOIOCTLCMD;
+ break;
}
return err;
}
-static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait)
+static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask;
mask |= POLLIN | POLLRDNORM;
/* Connection-based need to check for termination and startup */
- if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE)
+ if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
+ sk->sk_state == TCP_CLOSE)
mask |= POLLHUP;
/*