Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / tipc / socket.c
1 /*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, Ericsson AB
5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/net.h>
40 #include <linux/socket.h>
41 #include <linux/errno.h>
42 #include <linux/mm.h>
43 #include <linux/slab.h>
44 #include <linux/poll.h>
45 #include <linux/fcntl.h>
46 #include <asm/string.h>
47 #include <asm/atomic.h>
48 #include <net/sock.h>
49
50 #include <linux/tipc.h>
51 #include <linux/tipc_config.h>
52 #include <net/tipc/tipc_msg.h>
53 #include <net/tipc/tipc_port.h>
54
55 #include "core.h"
56
57 #define SS_LISTENING -1 /* socket is listening */
58 #define SS_READY -2 /* socket is connectionless */
59
60 #define OVERLOAD_LIMIT_BASE 5000
61 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
62
63 struct tipc_sock {
64 struct sock sk;
65 struct tipc_port *p;
66 };
67
68 #define tipc_sk(sk) ((struct tipc_sock *)(sk))
69 #define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p))
70
71 static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
72 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
73 static void wakeupdispatch(struct tipc_port *tport);
74
75 static const struct proto_ops packet_ops;
76 static const struct proto_ops stream_ops;
77 static const struct proto_ops msg_ops;
78
79 static struct proto tipc_proto;
80
81 static int sockets_enabled = 0;
82
83 static atomic_t tipc_queue_size = ATOMIC_INIT(0);
84
85 /*
86 * Revised TIPC socket locking policy:
87 *
88 * Most socket operations take the standard socket lock when they start
89 * and hold it until they finish (or until they need to sleep). Acquiring
90 * this lock grants the owner exclusive access to the fields of the socket
91 * data structures, with the exception of the backlog queue. A few socket
92 * operations can be done without taking the socket lock because they only
93 * read socket information that never changes during the life of the socket.
94 *
95 * Socket operations may acquire the lock for the associated TIPC port if they
96 * need to perform an operation on the port. If any routine needs to acquire
97 * both the socket lock and the port lock it must take the socket lock first
98 * to avoid the risk of deadlock.
99 *
100 * The dispatcher handling incoming messages cannot grab the socket lock in
101 * the standard fashion, since invoked it runs at the BH level and cannot block.
102 * Instead, it checks to see if the socket lock is currently owned by someone,
103 * and either handles the message itself or adds it to the socket's backlog
104 * queue; in the latter case the queued message is processed once the process
105 * owning the socket lock releases it.
106 *
107 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
108 * the problem of a blocked socket operation preventing any other operations
109 * from occurring. However, applications must be careful if they have
110 * multiple threads trying to send (or receive) on the same socket, as these
111 * operations might interfere with each other. For example, doing a connect
112 * and a receive at the same time might allow the receive to consume the
113 * ACK message meant for the connect. While additional work could be done
114 * to try and overcome this, it doesn't seem to be worthwhile at the present.
115 *
116 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
117 * that another operation that must be performed in a non-blocking manner is
118 * not delayed for very long because the lock has already been taken.
119 *
120 * NOTE: This code assumes that certain fields of a port/socket pair are
121 * constant over its lifetime; such fields can be examined without taking
122 * the socket lock and/or port lock, and do not need to be re-read even
123 * after resuming processing after waiting. These fields include:
124 * - socket type
125 * - pointer to socket sk structure (aka tipc_sock structure)
126 * - pointer to port structure
127 * - port reference
128 */
129
130 /**
131 * advance_rx_queue - discard first buffer in socket receive queue
132 *
133 * Caller must hold socket lock
134 */
135
136 static void advance_rx_queue(struct sock *sk)
137 {
138 buf_discard(__skb_dequeue(&sk->sk_receive_queue));
139 atomic_dec(&tipc_queue_size);
140 }
141
142 /**
143 * discard_rx_queue - discard all buffers in socket receive queue
144 *
145 * Caller must hold socket lock
146 */
147
148 static void discard_rx_queue(struct sock *sk)
149 {
150 struct sk_buff *buf;
151
152 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
153 atomic_dec(&tipc_queue_size);
154 buf_discard(buf);
155 }
156 }
157
158 /**
159 * reject_rx_queue - reject all buffers in socket receive queue
160 *
161 * Caller must hold socket lock
162 */
163
164 static void reject_rx_queue(struct sock *sk)
165 {
166 struct sk_buff *buf;
167
168 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
169 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
170 atomic_dec(&tipc_queue_size);
171 }
172 }
173
174 /**
175 * tipc_create - create a TIPC socket
176 * @net: network namespace (must be default network)
177 * @sock: pre-allocated socket structure
178 * @protocol: protocol indicator (must be 0)
179 *
180 * This routine creates additional data structures used by the TIPC socket,
181 * initializes them, and links them together.
182 *
183 * Returns 0 on success, errno otherwise
184 */
185
186 static int tipc_create(struct net *net, struct socket *sock, int protocol)
187 {
188 const struct proto_ops *ops;
189 socket_state state;
190 struct sock *sk;
191 struct tipc_port *tp_ptr;
192 u32 portref;
193
194 /* Validate arguments */
195
196 if (net != &init_net)
197 return -EAFNOSUPPORT;
198
199 if (unlikely(protocol != 0))
200 return -EPROTONOSUPPORT;
201
202 switch (sock->type) {
203 case SOCK_STREAM:
204 ops = &stream_ops;
205 state = SS_UNCONNECTED;
206 break;
207 case SOCK_SEQPACKET:
208 ops = &packet_ops;
209 state = SS_UNCONNECTED;
210 break;
211 case SOCK_DGRAM:
212 case SOCK_RDM:
213 ops = &msg_ops;
214 state = SS_READY;
215 break;
216 default:
217 return -EPROTOTYPE;
218 }
219
220 /* Allocate socket's protocol area */
221
222 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
223 if (sk == NULL)
224 return -ENOMEM;
225
226 /* Allocate TIPC port for socket to use */
227
228 portref = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
229 TIPC_LOW_IMPORTANCE, &tp_ptr);
230 if (unlikely(portref == 0)) {
231 sk_free(sk);
232 return -ENOMEM;
233 }
234
235 /* Finish initializing socket data structures */
236
237 sock->ops = ops;
238 sock->state = state;
239
240 sock_init_data(sock, sk);
241 sk->sk_rcvtimeo = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
242 sk->sk_backlog_rcv = backlog_rcv;
243 tipc_sk(sk)->p = tipc_get_port(portref);
244
245 spin_unlock_bh(tp_ptr->lock);
246
247 if (sock->state == SS_READY) {
248 tipc_set_portunreturnable(portref, 1);
249 if (sock->type == SOCK_DGRAM)
250 tipc_set_portunreliable(portref, 1);
251 }
252
253 atomic_inc(&tipc_user_count);
254 return 0;
255 }
256
257 /**
258 * release - destroy a TIPC socket
259 * @sock: socket to destroy
260 *
261 * This routine cleans up any messages that are still queued on the socket.
262 * For DGRAM and RDM socket types, all queued messages are rejected.
263 * For SEQPACKET and STREAM socket types, the first message is rejected
264 * and any others are discarded. (If the first message on a STREAM socket
265 * is partially-read, it is discarded and the next one is rejected instead.)
266 *
267 * NOTE: Rejected messages are not necessarily returned to the sender! They
268 * are returned or discarded according to the "destination droppable" setting
269 * specified for the message by the sender.
270 *
271 * Returns 0 on success, errno otherwise
272 */
273
274 static int release(struct socket *sock)
275 {
276 struct sock *sk = sock->sk;
277 struct tipc_port *tport;
278 struct sk_buff *buf;
279 int res;
280
281 /*
282 * Exit if socket isn't fully initialized (occurs when a failed accept()
283 * releases a pre-allocated child socket that was never used)
284 */
285
286 if (sk == NULL)
287 return 0;
288
289 tport = tipc_sk_port(sk);
290 lock_sock(sk);
291
292 /*
293 * Reject all unreceived messages, except on an active connection
294 * (which disconnects locally & sends a 'FIN+' to peer)
295 */
296
297 while (sock->state != SS_DISCONNECTING) {
298 buf = __skb_dequeue(&sk->sk_receive_queue);
299 if (buf == NULL)
300 break;
301 atomic_dec(&tipc_queue_size);
302 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
303 buf_discard(buf);
304 else {
305 if ((sock->state == SS_CONNECTING) ||
306 (sock->state == SS_CONNECTED)) {
307 sock->state = SS_DISCONNECTING;
308 tipc_disconnect(tport->ref);
309 }
310 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
311 }
312 }
313
314 /*
315 * Delete TIPC port; this ensures no more messages are queued
316 * (also disconnects an active connection & sends a 'FIN-' to peer)
317 */
318
319 res = tipc_deleteport(tport->ref);
320
321 /* Discard any remaining (connection-based) messages in receive queue */
322
323 discard_rx_queue(sk);
324
325 /* Reject any messages that accumulated in backlog queue */
326
327 sock->state = SS_DISCONNECTING;
328 release_sock(sk);
329
330 sock_put(sk);
331 sock->sk = NULL;
332
333 atomic_dec(&tipc_user_count);
334 return res;
335 }
336
337 /**
338 * bind - associate or disassocate TIPC name(s) with a socket
339 * @sock: socket structure
340 * @uaddr: socket address describing name(s) and desired operation
341 * @uaddr_len: size of socket address data structure
342 *
343 * Name and name sequence binding is indicated using a positive scope value;
344 * a negative scope value unbinds the specified name. Specifying no name
345 * (i.e. a socket address length of 0) unbinds all names from the socket.
346 *
347 * Returns 0 on success, errno otherwise
348 *
349 * NOTE: This routine doesn't need to take the socket lock since it doesn't
350 * access any non-constant socket information.
351 */
352
353 static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
354 {
355 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
356 u32 portref = tipc_sk_port(sock->sk)->ref;
357
358 if (unlikely(!uaddr_len))
359 return tipc_withdraw(portref, 0, NULL);
360
361 if (uaddr_len < sizeof(struct sockaddr_tipc))
362 return -EINVAL;
363 if (addr->family != AF_TIPC)
364 return -EAFNOSUPPORT;
365
366 if (addr->addrtype == TIPC_ADDR_NAME)
367 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
368 else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
369 return -EAFNOSUPPORT;
370
371 return (addr->scope > 0) ?
372 tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
373 tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
374 }
375
376 /**
377 * get_name - get port ID of socket or peer socket
378 * @sock: socket structure
379 * @uaddr: area for returned socket address
380 * @uaddr_len: area for returned length of socket address
381 * @peer: 0 to obtain socket name, 1 to obtain peer socket name
382 *
383 * Returns 0 on success, errno otherwise
384 *
385 * NOTE: This routine doesn't need to take the socket lock since it doesn't
386 * access any non-constant socket information.
387 */
388
389 static int get_name(struct socket *sock, struct sockaddr *uaddr,
390 int *uaddr_len, int peer)
391 {
392 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
393 u32 portref = tipc_sk_port(sock->sk)->ref;
394 u32 res;
395
396 if (peer) {
397 res = tipc_peer(portref, &addr->addr.id);
398 if (res)
399 return res;
400 } else {
401 tipc_ownidentity(portref, &addr->addr.id);
402 }
403
404 *uaddr_len = sizeof(*addr);
405 addr->addrtype = TIPC_ADDR_ID;
406 addr->family = AF_TIPC;
407 addr->scope = 0;
408 addr->addr.name.domain = 0;
409
410 return 0;
411 }
412
413 /**
414 * poll - read and possibly block on pollmask
415 * @file: file structure associated with the socket
416 * @sock: socket for which to calculate the poll bits
417 * @wait: ???
418 *
419 * Returns pollmask value
420 *
421 * COMMENTARY:
422 * It appears that the usual socket locking mechanisms are not useful here
423 * since the pollmask info is potentially out-of-date the moment this routine
424 * exits. TCP and other protocols seem to rely on higher level poll routines
425 * to handle any preventable race conditions, so TIPC will do the same ...
426 *
427 * TIPC sets the returned events as follows:
428 * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty
429 * or if a connection-oriented socket is does not have an active connection
430 * (i.e. a read operation will not block).
431 * b) POLLOUT is set except when a socket's connection has been terminated
432 * (i.e. a write operation will not block).
433 * c) POLLHUP is set when a socket's connection has been terminated.
434 *
435 * IMPORTANT: The fact that a read or write operation will not block does NOT
436 * imply that the operation will succeed!
437 */
438
439 static unsigned int poll(struct file *file, struct socket *sock,
440 poll_table *wait)
441 {
442 struct sock *sk = sock->sk;
443 u32 mask;
444
445 poll_wait(file, sk->sk_sleep, wait);
446
447 if (!skb_queue_empty(&sk->sk_receive_queue) ||
448 (sock->state == SS_UNCONNECTED) ||
449 (sock->state == SS_DISCONNECTING))
450 mask = (POLLRDNORM | POLLIN);
451 else
452 mask = 0;
453
454 if (sock->state == SS_DISCONNECTING)
455 mask |= POLLHUP;
456 else
457 mask |= POLLOUT;
458
459 return mask;
460 }
461
462 /**
463 * dest_name_check - verify user is permitted to send to specified port name
464 * @dest: destination address
465 * @m: descriptor for message to be sent
466 *
467 * Prevents restricted configuration commands from being issued by
468 * unauthorized users.
469 *
470 * Returns 0 if permission is granted, otherwise errno
471 */
472
473 static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
474 {
475 struct tipc_cfg_msg_hdr hdr;
476
477 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
478 return 0;
479 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
480 return 0;
481 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
482 return -EACCES;
483
484 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
485 return -EFAULT;
486 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
487 return -EACCES;
488
489 return 0;
490 }
491
492 /**
493 * send_msg - send message in connectionless manner
494 * @iocb: if NULL, indicates that socket lock is already held
495 * @sock: socket structure
496 * @m: message to send
497 * @total_len: length of message
498 *
499 * Message must have an destination specified explicitly.
500 * Used for SOCK_RDM and SOCK_DGRAM messages,
501 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
502 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
503 *
504 * Returns the number of bytes sent on success, or errno otherwise
505 */
506
507 static int send_msg(struct kiocb *iocb, struct socket *sock,
508 struct msghdr *m, size_t total_len)
509 {
510 struct sock *sk = sock->sk;
511 struct tipc_port *tport = tipc_sk_port(sk);
512 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
513 int needs_conn;
514 int res = -EINVAL;
515
516 if (unlikely(!dest))
517 return -EDESTADDRREQ;
518 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
519 (dest->family != AF_TIPC)))
520 return -EINVAL;
521
522 if (iocb)
523 lock_sock(sk);
524
525 needs_conn = (sock->state != SS_READY);
526 if (unlikely(needs_conn)) {
527 if (sock->state == SS_LISTENING) {
528 res = -EPIPE;
529 goto exit;
530 }
531 if (sock->state != SS_UNCONNECTED) {
532 res = -EISCONN;
533 goto exit;
534 }
535 if ((tport->published) ||
536 ((sock->type == SOCK_STREAM) && (total_len != 0))) {
537 res = -EOPNOTSUPP;
538 goto exit;
539 }
540 if (dest->addrtype == TIPC_ADDR_NAME) {
541 tport->conn_type = dest->addr.name.name.type;
542 tport->conn_instance = dest->addr.name.name.instance;
543 }
544
545 /* Abort any pending connection attempts (very unlikely) */
546
547 reject_rx_queue(sk);
548 }
549
550 do {
551 if (dest->addrtype == TIPC_ADDR_NAME) {
552 if ((res = dest_name_check(dest, m)))
553 break;
554 res = tipc_send2name(tport->ref,
555 &dest->addr.name.name,
556 dest->addr.name.domain,
557 m->msg_iovlen,
558 m->msg_iov);
559 }
560 else if (dest->addrtype == TIPC_ADDR_ID) {
561 res = tipc_send2port(tport->ref,
562 &dest->addr.id,
563 m->msg_iovlen,
564 m->msg_iov);
565 }
566 else if (dest->addrtype == TIPC_ADDR_MCAST) {
567 if (needs_conn) {
568 res = -EOPNOTSUPP;
569 break;
570 }
571 if ((res = dest_name_check(dest, m)))
572 break;
573 res = tipc_multicast(tport->ref,
574 &dest->addr.nameseq,
575 0,
576 m->msg_iovlen,
577 m->msg_iov);
578 }
579 if (likely(res != -ELINKCONG)) {
580 if (needs_conn && (res >= 0)) {
581 sock->state = SS_CONNECTING;
582 }
583 break;
584 }
585 if (m->msg_flags & MSG_DONTWAIT) {
586 res = -EWOULDBLOCK;
587 break;
588 }
589 release_sock(sk);
590 res = wait_event_interruptible(*sk->sk_sleep,
591 !tport->congested);
592 lock_sock(sk);
593 if (res)
594 break;
595 } while (1);
596
597 exit:
598 if (iocb)
599 release_sock(sk);
600 return res;
601 }
602
603 /**
604 * send_packet - send a connection-oriented message
605 * @iocb: if NULL, indicates that socket lock is already held
606 * @sock: socket structure
607 * @m: message to send
608 * @total_len: length of message
609 *
610 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
611 *
612 * Returns the number of bytes sent on success, or errno otherwise
613 */
614
615 static int send_packet(struct kiocb *iocb, struct socket *sock,
616 struct msghdr *m, size_t total_len)
617 {
618 struct sock *sk = sock->sk;
619 struct tipc_port *tport = tipc_sk_port(sk);
620 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
621 int res;
622
623 /* Handle implied connection establishment */
624
625 if (unlikely(dest))
626 return send_msg(iocb, sock, m, total_len);
627
628 if (iocb)
629 lock_sock(sk);
630
631 do {
632 if (unlikely(sock->state != SS_CONNECTED)) {
633 if (sock->state == SS_DISCONNECTING)
634 res = -EPIPE;
635 else
636 res = -ENOTCONN;
637 break;
638 }
639
640 res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov);
641 if (likely(res != -ELINKCONG)) {
642 break;
643 }
644 if (m->msg_flags & MSG_DONTWAIT) {
645 res = -EWOULDBLOCK;
646 break;
647 }
648 release_sock(sk);
649 res = wait_event_interruptible(*sk->sk_sleep,
650 (!tport->congested || !tport->connected));
651 lock_sock(sk);
652 if (res)
653 break;
654 } while (1);
655
656 if (iocb)
657 release_sock(sk);
658 return res;
659 }
660
661 /**
662 * send_stream - send stream-oriented data
663 * @iocb: (unused)
664 * @sock: socket structure
665 * @m: data to send
666 * @total_len: total length of data to be sent
667 *
668 * Used for SOCK_STREAM data.
669 *
670 * Returns the number of bytes sent on success (or partial success),
671 * or errno if no data sent
672 */
673
674 static int send_stream(struct kiocb *iocb, struct socket *sock,
675 struct msghdr *m, size_t total_len)
676 {
677 struct sock *sk = sock->sk;
678 struct tipc_port *tport = tipc_sk_port(sk);
679 struct msghdr my_msg;
680 struct iovec my_iov;
681 struct iovec *curr_iov;
682 int curr_iovlen;
683 char __user *curr_start;
684 u32 hdr_size;
685 int curr_left;
686 int bytes_to_send;
687 int bytes_sent;
688 int res;
689
690 lock_sock(sk);
691
692 /* Handle special cases where there is no connection */
693
694 if (unlikely(sock->state != SS_CONNECTED)) {
695 if (sock->state == SS_UNCONNECTED) {
696 res = send_packet(NULL, sock, m, total_len);
697 goto exit;
698 } else if (sock->state == SS_DISCONNECTING) {
699 res = -EPIPE;
700 goto exit;
701 } else {
702 res = -ENOTCONN;
703 goto exit;
704 }
705 }
706
707 if (unlikely(m->msg_name)) {
708 res = -EISCONN;
709 goto exit;
710 }
711
712 /*
713 * Send each iovec entry using one or more messages
714 *
715 * Note: This algorithm is good for the most likely case
716 * (i.e. one large iovec entry), but could be improved to pass sets
717 * of small iovec entries into send_packet().
718 */
719
720 curr_iov = m->msg_iov;
721 curr_iovlen = m->msg_iovlen;
722 my_msg.msg_iov = &my_iov;
723 my_msg.msg_iovlen = 1;
724 my_msg.msg_flags = m->msg_flags;
725 my_msg.msg_name = NULL;
726 bytes_sent = 0;
727
728 hdr_size = msg_hdr_sz(&tport->phdr);
729
730 while (curr_iovlen--) {
731 curr_start = curr_iov->iov_base;
732 curr_left = curr_iov->iov_len;
733
734 while (curr_left) {
735 bytes_to_send = tport->max_pkt - hdr_size;
736 if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
737 bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
738 if (curr_left < bytes_to_send)
739 bytes_to_send = curr_left;
740 my_iov.iov_base = curr_start;
741 my_iov.iov_len = bytes_to_send;
742 if ((res = send_packet(NULL, sock, &my_msg, 0)) < 0) {
743 if (bytes_sent)
744 res = bytes_sent;
745 goto exit;
746 }
747 curr_left -= bytes_to_send;
748 curr_start += bytes_to_send;
749 bytes_sent += bytes_to_send;
750 }
751
752 curr_iov++;
753 }
754 res = bytes_sent;
755 exit:
756 release_sock(sk);
757 return res;
758 }
759
760 /**
761 * auto_connect - complete connection setup to a remote port
762 * @sock: socket structure
763 * @msg: peer's response message
764 *
765 * Returns 0 on success, errno otherwise
766 */
767
768 static int auto_connect(struct socket *sock, struct tipc_msg *msg)
769 {
770 struct tipc_port *tport = tipc_sk_port(sock->sk);
771 struct tipc_portid peer;
772
773 if (msg_errcode(msg)) {
774 sock->state = SS_DISCONNECTING;
775 return -ECONNREFUSED;
776 }
777
778 peer.ref = msg_origport(msg);
779 peer.node = msg_orignode(msg);
780 tipc_connect2port(tport->ref, &peer);
781 tipc_set_portimportance(tport->ref, msg_importance(msg));
782 sock->state = SS_CONNECTED;
783 return 0;
784 }
785
786 /**
787 * set_orig_addr - capture sender's address for received message
788 * @m: descriptor for message info
789 * @msg: received message header
790 *
791 * Note: Address is not captured if not requested by receiver.
792 */
793
794 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
795 {
796 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
797
798 if (addr) {
799 addr->family = AF_TIPC;
800 addr->addrtype = TIPC_ADDR_ID;
801 addr->addr.id.ref = msg_origport(msg);
802 addr->addr.id.node = msg_orignode(msg);
803 addr->addr.name.domain = 0; /* could leave uninitialized */
804 addr->scope = 0; /* could leave uninitialized */
805 m->msg_namelen = sizeof(struct sockaddr_tipc);
806 }
807 }
808
809 /**
810 * anc_data_recv - optionally capture ancillary data for received message
811 * @m: descriptor for message info
812 * @msg: received message header
813 * @tport: TIPC port associated with message
814 *
815 * Note: Ancillary data is not captured if not requested by receiver.
816 *
817 * Returns 0 if successful, otherwise errno
818 */
819
820 static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
821 struct tipc_port *tport)
822 {
823 u32 anc_data[3];
824 u32 err;
825 u32 dest_type;
826 int has_name;
827 int res;
828
829 if (likely(m->msg_controllen == 0))
830 return 0;
831
832 /* Optionally capture errored message object(s) */
833
834 err = msg ? msg_errcode(msg) : 0;
835 if (unlikely(err)) {
836 anc_data[0] = err;
837 anc_data[1] = msg_data_sz(msg);
838 if ((res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data)))
839 return res;
840 if (anc_data[1] &&
841 (res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
842 msg_data(msg))))
843 return res;
844 }
845
846 /* Optionally capture message destination object */
847
848 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
849 switch (dest_type) {
850 case TIPC_NAMED_MSG:
851 has_name = 1;
852 anc_data[0] = msg_nametype(msg);
853 anc_data[1] = msg_namelower(msg);
854 anc_data[2] = msg_namelower(msg);
855 break;
856 case TIPC_MCAST_MSG:
857 has_name = 1;
858 anc_data[0] = msg_nametype(msg);
859 anc_data[1] = msg_namelower(msg);
860 anc_data[2] = msg_nameupper(msg);
861 break;
862 case TIPC_CONN_MSG:
863 has_name = (tport->conn_type != 0);
864 anc_data[0] = tport->conn_type;
865 anc_data[1] = tport->conn_instance;
866 anc_data[2] = tport->conn_instance;
867 break;
868 default:
869 has_name = 0;
870 }
871 if (has_name &&
872 (res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data)))
873 return res;
874
875 return 0;
876 }
877
878 /**
879 * recv_msg - receive packet-oriented message
880 * @iocb: (unused)
881 * @m: descriptor for message info
882 * @buf_len: total size of user buffer area
883 * @flags: receive flags
884 *
885 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
886 * If the complete message doesn't fit in user area, truncate it.
887 *
888 * Returns size of returned message data, errno otherwise
889 */
890
891 static int recv_msg(struct kiocb *iocb, struct socket *sock,
892 struct msghdr *m, size_t buf_len, int flags)
893 {
894 struct sock *sk = sock->sk;
895 struct tipc_port *tport = tipc_sk_port(sk);
896 struct sk_buff *buf;
897 struct tipc_msg *msg;
898 unsigned int sz;
899 u32 err;
900 int res;
901
902 /* Catch invalid receive requests */
903
904 if (m->msg_iovlen != 1)
905 return -EOPNOTSUPP; /* Don't do multiple iovec entries yet */
906
907 if (unlikely(!buf_len))
908 return -EINVAL;
909
910 lock_sock(sk);
911
912 if (unlikely(sock->state == SS_UNCONNECTED)) {
913 res = -ENOTCONN;
914 goto exit;
915 }
916
917 restart:
918
919 /* Look for a message in receive queue; wait if necessary */
920
921 while (skb_queue_empty(&sk->sk_receive_queue)) {
922 if (sock->state == SS_DISCONNECTING) {
923 res = -ENOTCONN;
924 goto exit;
925 }
926 if (flags & MSG_DONTWAIT) {
927 res = -EWOULDBLOCK;
928 goto exit;
929 }
930 release_sock(sk);
931 res = wait_event_interruptible(*sk->sk_sleep,
932 (!skb_queue_empty(&sk->sk_receive_queue) ||
933 (sock->state == SS_DISCONNECTING)));
934 lock_sock(sk);
935 if (res)
936 goto exit;
937 }
938
939 /* Look at first message in receive queue */
940
941 buf = skb_peek(&sk->sk_receive_queue);
942 msg = buf_msg(buf);
943 sz = msg_data_sz(msg);
944 err = msg_errcode(msg);
945
946 /* Complete connection setup for an implied connect */
947
948 if (unlikely(sock->state == SS_CONNECTING)) {
949 res = auto_connect(sock, msg);
950 if (res)
951 goto exit;
952 }
953
954 /* Discard an empty non-errored message & try again */
955
956 if ((!sz) && (!err)) {
957 advance_rx_queue(sk);
958 goto restart;
959 }
960
961 /* Capture sender's address (optional) */
962
963 set_orig_addr(m, msg);
964
965 /* Capture ancillary data (optional) */
966
967 res = anc_data_recv(m, msg, tport);
968 if (res)
969 goto exit;
970
971 /* Capture message data (if valid) & compute return value (always) */
972
973 if (!err) {
974 if (unlikely(buf_len < sz)) {
975 sz = buf_len;
976 m->msg_flags |= MSG_TRUNC;
977 }
978 if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg),
979 sz))) {
980 res = -EFAULT;
981 goto exit;
982 }
983 res = sz;
984 } else {
985 if ((sock->state == SS_READY) ||
986 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
987 res = 0;
988 else
989 res = -ECONNRESET;
990 }
991
992 /* Consume received message (optional) */
993
994 if (likely(!(flags & MSG_PEEK))) {
995 if ((sock->state != SS_READY) &&
996 (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
997 tipc_acknowledge(tport->ref, tport->conn_unacked);
998 advance_rx_queue(sk);
999 }
1000 exit:
1001 release_sock(sk);
1002 return res;
1003 }
1004
1005 /**
1006 * recv_stream - receive stream-oriented data
1007 * @iocb: (unused)
1008 * @m: descriptor for message info
1009 * @buf_len: total size of user buffer area
1010 * @flags: receive flags
1011 *
1012 * Used for SOCK_STREAM messages only. If not enough data is available
1013 * will optionally wait for more; never truncates data.
1014 *
1015 * Returns size of returned message data, errno otherwise
1016 */
1017
1018 static int recv_stream(struct kiocb *iocb, struct socket *sock,
1019 struct msghdr *m, size_t buf_len, int flags)
1020 {
1021 struct sock *sk = sock->sk;
1022 struct tipc_port *tport = tipc_sk_port(sk);
1023 struct sk_buff *buf;
1024 struct tipc_msg *msg;
1025 unsigned int sz;
1026 int sz_to_copy;
1027 int sz_copied = 0;
1028 int needed;
1029 char __user *crs = m->msg_iov->iov_base;
1030 unsigned char *buf_crs;
1031 u32 err;
1032 int res = 0;
1033
1034 /* Catch invalid receive attempts */
1035
1036 if (m->msg_iovlen != 1)
1037 return -EOPNOTSUPP; /* Don't do multiple iovec entries yet */
1038
1039 if (unlikely(!buf_len))
1040 return -EINVAL;
1041
1042 lock_sock(sk);
1043
1044 if (unlikely((sock->state == SS_UNCONNECTED) ||
1045 (sock->state == SS_CONNECTING))) {
1046 res = -ENOTCONN;
1047 goto exit;
1048 }
1049
1050 restart:
1051
1052 /* Look for a message in receive queue; wait if necessary */
1053
1054 while (skb_queue_empty(&sk->sk_receive_queue)) {
1055 if (sock->state == SS_DISCONNECTING) {
1056 res = -ENOTCONN;
1057 goto exit;
1058 }
1059 if (flags & MSG_DONTWAIT) {
1060 res = -EWOULDBLOCK;
1061 goto exit;
1062 }
1063 release_sock(sk);
1064 res = wait_event_interruptible(*sk->sk_sleep,
1065 (!skb_queue_empty(&sk->sk_receive_queue) ||
1066 (sock->state == SS_DISCONNECTING)));
1067 lock_sock(sk);
1068 if (res)
1069 goto exit;
1070 }
1071
1072 /* Look at first message in receive queue */
1073
1074 buf = skb_peek(&sk->sk_receive_queue);
1075 msg = buf_msg(buf);
1076 sz = msg_data_sz(msg);
1077 err = msg_errcode(msg);
1078
1079 /* Discard an empty non-errored message & try again */
1080
1081 if ((!sz) && (!err)) {
1082 advance_rx_queue(sk);
1083 goto restart;
1084 }
1085
1086 /* Optionally capture sender's address & ancillary data of first msg */
1087
1088 if (sz_copied == 0) {
1089 set_orig_addr(m, msg);
1090 res = anc_data_recv(m, msg, tport);
1091 if (res)
1092 goto exit;
1093 }
1094
1095 /* Capture message data (if valid) & compute return value (always) */
1096
1097 if (!err) {
1098 buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
1099 sz = (unsigned char *)msg + msg_size(msg) - buf_crs;
1100
1101 needed = (buf_len - sz_copied);
1102 sz_to_copy = (sz <= needed) ? sz : needed;
1103 if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) {
1104 res = -EFAULT;
1105 goto exit;
1106 }
1107 sz_copied += sz_to_copy;
1108
1109 if (sz_to_copy < sz) {
1110 if (!(flags & MSG_PEEK))
1111 TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy;
1112 goto exit;
1113 }
1114
1115 crs += sz_to_copy;
1116 } else {
1117 if (sz_copied != 0)
1118 goto exit; /* can't add error msg to valid data */
1119
1120 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1121 res = 0;
1122 else
1123 res = -ECONNRESET;
1124 }
1125
1126 /* Consume received message (optional) */
1127
1128 if (likely(!(flags & MSG_PEEK))) {
1129 if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1130 tipc_acknowledge(tport->ref, tport->conn_unacked);
1131 advance_rx_queue(sk);
1132 }
1133
1134 /* Loop around if more data is required */
1135
1136 if ((sz_copied < buf_len) /* didn't get all requested data */
1137 && (!skb_queue_empty(&sock->sk->sk_receive_queue) ||
1138 (flags & MSG_WAITALL))
1139 /* ... and more is ready or required */
1140 && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
1141 && (!err) /* ... and haven't reached a FIN */
1142 )
1143 goto restart;
1144
1145 exit:
1146 release_sock(sk);
1147 return sz_copied ? sz_copied : res;
1148 }
1149
1150 /**
1151 * rx_queue_full - determine if receive queue can accept another message
1152 * @msg: message to be added to queue
1153 * @queue_size: current size of queue
1154 * @base: nominal maximum size of queue
1155 *
1156 * Returns 1 if queue is unable to accept message, 0 otherwise
1157 */
1158
1159 static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1160 {
1161 u32 threshold;
1162 u32 imp = msg_importance(msg);
1163
1164 if (imp == TIPC_LOW_IMPORTANCE)
1165 threshold = base;
1166 else if (imp == TIPC_MEDIUM_IMPORTANCE)
1167 threshold = base * 2;
1168 else if (imp == TIPC_HIGH_IMPORTANCE)
1169 threshold = base * 100;
1170 else
1171 return 0;
1172
1173 if (msg_connected(msg))
1174 threshold *= 4;
1175
1176 return (queue_size >= threshold);
1177 }
1178
1179 /**
1180 * filter_rcv - validate incoming message
1181 * @sk: socket
1182 * @buf: message
1183 *
1184 * Enqueues message on receive queue if acceptable; optionally handles
1185 * disconnect indication for a connected socket.
1186 *
1187 * Called with socket lock already taken; port lock may also be taken.
1188 *
1189 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1190 */
1191
1192 static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1193 {
1194 struct socket *sock = sk->sk_socket;
1195 struct tipc_msg *msg = buf_msg(buf);
1196 u32 recv_q_len;
1197
1198 /* Reject message if it is wrong sort of message for socket */
1199
1200 /*
1201 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
1202 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
1203 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
1204 */
1205
1206 if (sock->state == SS_READY) {
1207 if (msg_connected(msg)) {
1208 msg_dbg(msg, "dispatch filter 1\n");
1209 return TIPC_ERR_NO_PORT;
1210 }
1211 } else {
1212 if (msg_mcast(msg)) {
1213 msg_dbg(msg, "dispatch filter 2\n");
1214 return TIPC_ERR_NO_PORT;
1215 }
1216 if (sock->state == SS_CONNECTED) {
1217 if (!msg_connected(msg)) {
1218 msg_dbg(msg, "dispatch filter 3\n");
1219 return TIPC_ERR_NO_PORT;
1220 }
1221 }
1222 else if (sock->state == SS_CONNECTING) {
1223 if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
1224 msg_dbg(msg, "dispatch filter 4\n");
1225 return TIPC_ERR_NO_PORT;
1226 }
1227 }
1228 else if (sock->state == SS_LISTENING) {
1229 if (msg_connected(msg) || msg_errcode(msg)) {
1230 msg_dbg(msg, "dispatch filter 5\n");
1231 return TIPC_ERR_NO_PORT;
1232 }
1233 }
1234 else if (sock->state == SS_DISCONNECTING) {
1235 msg_dbg(msg, "dispatch filter 6\n");
1236 return TIPC_ERR_NO_PORT;
1237 }
1238 else /* (sock->state == SS_UNCONNECTED) */ {
1239 if (msg_connected(msg) || msg_errcode(msg)) {
1240 msg_dbg(msg, "dispatch filter 7\n");
1241 return TIPC_ERR_NO_PORT;
1242 }
1243 }
1244 }
1245
1246 /* Reject message if there isn't room to queue it */
1247
1248 recv_q_len = (u32)atomic_read(&tipc_queue_size);
1249 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
1250 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
1251 return TIPC_ERR_OVERLOAD;
1252 }
1253 recv_q_len = skb_queue_len(&sk->sk_receive_queue);
1254 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
1255 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
1256 return TIPC_ERR_OVERLOAD;
1257 }
1258
1259 /* Enqueue message (finally!) */
1260
1261 msg_dbg(msg, "<DISP<: ");
1262 TIPC_SKB_CB(buf)->handle = msg_data(msg);
1263 atomic_inc(&tipc_queue_size);
1264 __skb_queue_tail(&sk->sk_receive_queue, buf);
1265
1266 /* Initiate connection termination for an incoming 'FIN' */
1267
1268 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1269 sock->state = SS_DISCONNECTING;
1270 tipc_disconnect_port(tipc_sk_port(sk));
1271 }
1272
1273 if (waitqueue_active(sk->sk_sleep))
1274 wake_up_interruptible(sk->sk_sleep);
1275 return TIPC_OK;
1276 }
1277
1278 /**
1279 * backlog_rcv - handle incoming message from backlog queue
1280 * @sk: socket
1281 * @buf: message
1282 *
1283 * Caller must hold socket lock, but not port lock.
1284 *
1285 * Returns 0
1286 */
1287
1288 static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
1289 {
1290 u32 res;
1291
1292 res = filter_rcv(sk, buf);
1293 if (res)
1294 tipc_reject_msg(buf, res);
1295 return 0;
1296 }
1297
1298 /**
1299 * dispatch - handle incoming message
1300 * @tport: TIPC port that received message
1301 * @buf: message
1302 *
1303 * Called with port lock already taken.
1304 *
1305 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1306 */
1307
1308 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1309 {
1310 struct sock *sk = (struct sock *)tport->usr_handle;
1311 u32 res;
1312
1313 /*
1314 * Process message if socket is unlocked; otherwise add to backlog queue
1315 *
1316 * This code is based on sk_receive_skb(), but must be distinct from it
1317 * since a TIPC-specific filter/reject mechanism is utilized
1318 */
1319
1320 bh_lock_sock(sk);
1321 if (!sock_owned_by_user(sk)) {
1322 res = filter_rcv(sk, buf);
1323 } else {
1324 sk_add_backlog(sk, buf);
1325 res = TIPC_OK;
1326 }
1327 bh_unlock_sock(sk);
1328
1329 return res;
1330 }
1331
1332 /**
1333 * wakeupdispatch - wake up port after congestion
1334 * @tport: port to wakeup
1335 *
1336 * Called with port lock already taken.
1337 */
1338
1339 static void wakeupdispatch(struct tipc_port *tport)
1340 {
1341 struct sock *sk = (struct sock *)tport->usr_handle;
1342
1343 if (waitqueue_active(sk->sk_sleep))
1344 wake_up_interruptible(sk->sk_sleep);
1345 }
1346
1347 /**
1348 * connect - establish a connection to another TIPC port
1349 * @sock: socket structure
1350 * @dest: socket address for destination port
1351 * @destlen: size of socket address data structure
1352 * @flags: file-related flags associated with socket
1353 *
1354 * Returns 0 on success, errno otherwise
1355 */
1356
1357 static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1358 int flags)
1359 {
1360 struct sock *sk = sock->sk;
1361 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1362 struct msghdr m = {NULL,};
1363 struct sk_buff *buf;
1364 struct tipc_msg *msg;
1365 int res;
1366
1367 lock_sock(sk);
1368
1369 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1370
1371 if (sock->state == SS_READY) {
1372 res = -EOPNOTSUPP;
1373 goto exit;
1374 }
1375
1376 /* For now, TIPC does not support the non-blocking form of connect() */
1377
1378 if (flags & O_NONBLOCK) {
1379 res = -EWOULDBLOCK;
1380 goto exit;
1381 }
1382
1383 /* Issue Posix-compliant error code if socket is in the wrong state */
1384
1385 if (sock->state == SS_LISTENING) {
1386 res = -EOPNOTSUPP;
1387 goto exit;
1388 }
1389 if (sock->state == SS_CONNECTING) {
1390 res = -EALREADY;
1391 goto exit;
1392 }
1393 if (sock->state != SS_UNCONNECTED) {
1394 res = -EISCONN;
1395 goto exit;
1396 }
1397
1398 /*
1399 * Reject connection attempt using multicast address
1400 *
1401 * Note: send_msg() validates the rest of the address fields,
1402 * so there's no need to do it here
1403 */
1404
1405 if (dst->addrtype == TIPC_ADDR_MCAST) {
1406 res = -EINVAL;
1407 goto exit;
1408 }
1409
1410 /* Reject any messages already in receive queue (very unlikely) */
1411
1412 reject_rx_queue(sk);
1413
1414 /* Send a 'SYN-' to destination */
1415
1416 m.msg_name = dest;
1417 m.msg_namelen = destlen;
1418 res = send_msg(NULL, sock, &m, 0);
1419 if (res < 0) {
1420 goto exit;
1421 }
1422
1423 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1424
1425 release_sock(sk);
1426 res = wait_event_interruptible_timeout(*sk->sk_sleep,
1427 (!skb_queue_empty(&sk->sk_receive_queue) ||
1428 (sock->state != SS_CONNECTING)),
1429 sk->sk_rcvtimeo);
1430 lock_sock(sk);
1431
1432 if (res > 0) {
1433 buf = skb_peek(&sk->sk_receive_queue);
1434 if (buf != NULL) {
1435 msg = buf_msg(buf);
1436 res = auto_connect(sock, msg);
1437 if (!res) {
1438 if (!msg_data_sz(msg))
1439 advance_rx_queue(sk);
1440 }
1441 } else {
1442 if (sock->state == SS_CONNECTED) {
1443 res = -EISCONN;
1444 } else {
1445 res = -ECONNREFUSED;
1446 }
1447 }
1448 } else {
1449 if (res == 0)
1450 res = -ETIMEDOUT;
1451 else
1452 ; /* leave "res" unchanged */
1453 sock->state = SS_DISCONNECTING;
1454 }
1455
1456 exit:
1457 release_sock(sk);
1458 return res;
1459 }
1460
1461 /**
1462 * listen - allow socket to listen for incoming connections
1463 * @sock: socket structure
1464 * @len: (unused)
1465 *
1466 * Returns 0 on success, errno otherwise
1467 */
1468
1469 static int listen(struct socket *sock, int len)
1470 {
1471 struct sock *sk = sock->sk;
1472 int res;
1473
1474 lock_sock(sk);
1475
1476 if (sock->state == SS_READY)
1477 res = -EOPNOTSUPP;
1478 else if (sock->state != SS_UNCONNECTED)
1479 res = -EINVAL;
1480 else {
1481 sock->state = SS_LISTENING;
1482 res = 0;
1483 }
1484
1485 release_sock(sk);
1486 return res;
1487 }
1488
1489 /**
1490 * accept - wait for connection request
1491 * @sock: listening socket
1492 * @newsock: new socket that is to be connected
1493 * @flags: file-related flags associated with socket
1494 *
1495 * Returns 0 on success, errno otherwise
1496 */
1497
1498 static int accept(struct socket *sock, struct socket *new_sock, int flags)
1499 {
1500 struct sock *sk = sock->sk;
1501 struct sk_buff *buf;
1502 int res;
1503
1504 lock_sock(sk);
1505
1506 if (sock->state == SS_READY) {
1507 res = -EOPNOTSUPP;
1508 goto exit;
1509 }
1510 if (sock->state != SS_LISTENING) {
1511 res = -EINVAL;
1512 goto exit;
1513 }
1514
1515 while (skb_queue_empty(&sk->sk_receive_queue)) {
1516 if (flags & O_NONBLOCK) {
1517 res = -EWOULDBLOCK;
1518 goto exit;
1519 }
1520 release_sock(sk);
1521 res = wait_event_interruptible(*sk->sk_sleep,
1522 (!skb_queue_empty(&sk->sk_receive_queue)));
1523 lock_sock(sk);
1524 if (res)
1525 goto exit;
1526 }
1527
1528 buf = skb_peek(&sk->sk_receive_queue);
1529
1530 res = tipc_create(sock_net(sock->sk), new_sock, 0);
1531 if (!res) {
1532 struct sock *new_sk = new_sock->sk;
1533 struct tipc_port *new_tport = tipc_sk_port(new_sk);
1534 u32 new_ref = new_tport->ref;
1535 struct tipc_portid id;
1536 struct tipc_msg *msg = buf_msg(buf);
1537
1538 lock_sock(new_sk);
1539
1540 /*
1541 * Reject any stray messages received by new socket
1542 * before the socket lock was taken (very, very unlikely)
1543 */
1544
1545 reject_rx_queue(new_sk);
1546
1547 /* Connect new socket to it's peer */
1548
1549 id.ref = msg_origport(msg);
1550 id.node = msg_orignode(msg);
1551 tipc_connect2port(new_ref, &id);
1552 new_sock->state = SS_CONNECTED;
1553
1554 tipc_set_portimportance(new_ref, msg_importance(msg));
1555 if (msg_named(msg)) {
1556 new_tport->conn_type = msg_nametype(msg);
1557 new_tport->conn_instance = msg_nameinst(msg);
1558 }
1559
1560 /*
1561 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1562 * Respond to 'SYN+' by queuing it on new socket.
1563 */
1564
1565 msg_dbg(msg,"<ACC<: ");
1566 if (!msg_data_sz(msg)) {
1567 struct msghdr m = {NULL,};
1568
1569 advance_rx_queue(sk);
1570 send_packet(NULL, new_sock, &m, 0);
1571 } else {
1572 __skb_dequeue(&sk->sk_receive_queue);
1573 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1574 }
1575 release_sock(new_sk);
1576 }
1577 exit:
1578 release_sock(sk);
1579 return res;
1580 }
1581
1582 /**
1583 * shutdown - shutdown socket connection
1584 * @sock: socket structure
1585 * @how: direction to close (must be SHUT_RDWR)
1586 *
1587 * Terminates connection (if necessary), then purges socket's receive queue.
1588 *
1589 * Returns 0 on success, errno otherwise
1590 */
1591
1592 static int shutdown(struct socket *sock, int how)
1593 {
1594 struct sock *sk = sock->sk;
1595 struct tipc_port *tport = tipc_sk_port(sk);
1596 struct sk_buff *buf;
1597 int res;
1598
1599 if (how != SHUT_RDWR)
1600 return -EINVAL;
1601
1602 lock_sock(sk);
1603
1604 switch (sock->state) {
1605 case SS_CONNECTING:
1606 case SS_CONNECTED:
1607
1608 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1609 restart:
1610 buf = __skb_dequeue(&sk->sk_receive_queue);
1611 if (buf) {
1612 atomic_dec(&tipc_queue_size);
1613 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
1614 buf_discard(buf);
1615 goto restart;
1616 }
1617 tipc_disconnect(tport->ref);
1618 tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
1619 } else {
1620 tipc_shutdown(tport->ref);
1621 }
1622
1623 sock->state = SS_DISCONNECTING;
1624
1625 /* fall through */
1626
1627 case SS_DISCONNECTING:
1628
1629 /* Discard any unreceived messages; wake up sleeping tasks */
1630
1631 discard_rx_queue(sk);
1632 if (waitqueue_active(sk->sk_sleep))
1633 wake_up_interruptible(sk->sk_sleep);
1634 res = 0;
1635 break;
1636
1637 default:
1638 res = -ENOTCONN;
1639 }
1640
1641 release_sock(sk);
1642 return res;
1643 }
1644
1645 /**
1646 * setsockopt - set socket option
1647 * @sock: socket structure
1648 * @lvl: option level
1649 * @opt: option identifier
1650 * @ov: pointer to new option value
1651 * @ol: length of option value
1652 *
1653 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
1654 * (to ease compatibility).
1655 *
1656 * Returns 0 on success, errno otherwise
1657 */
1658
1659 static int setsockopt(struct socket *sock,
1660 int lvl, int opt, char __user *ov, int ol)
1661 {
1662 struct sock *sk = sock->sk;
1663 struct tipc_port *tport = tipc_sk_port(sk);
1664 u32 value;
1665 int res;
1666
1667 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1668 return 0;
1669 if (lvl != SOL_TIPC)
1670 return -ENOPROTOOPT;
1671 if (ol < sizeof(value))
1672 return -EINVAL;
1673 if ((res = get_user(value, (u32 __user *)ov)))
1674 return res;
1675
1676 lock_sock(sk);
1677
1678 switch (opt) {
1679 case TIPC_IMPORTANCE:
1680 res = tipc_set_portimportance(tport->ref, value);
1681 break;
1682 case TIPC_SRC_DROPPABLE:
1683 if (sock->type != SOCK_STREAM)
1684 res = tipc_set_portunreliable(tport->ref, value);
1685 else
1686 res = -ENOPROTOOPT;
1687 break;
1688 case TIPC_DEST_DROPPABLE:
1689 res = tipc_set_portunreturnable(tport->ref, value);
1690 break;
1691 case TIPC_CONN_TIMEOUT:
1692 sk->sk_rcvtimeo = msecs_to_jiffies(value);
1693 /* no need to set "res", since already 0 at this point */
1694 break;
1695 default:
1696 res = -EINVAL;
1697 }
1698
1699 release_sock(sk);
1700
1701 return res;
1702 }
1703
1704 /**
1705 * getsockopt - get socket option
1706 * @sock: socket structure
1707 * @lvl: option level
1708 * @opt: option identifier
1709 * @ov: receptacle for option value
1710 * @ol: receptacle for length of option value
1711 *
1712 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
1713 * (to ease compatibility).
1714 *
1715 * Returns 0 on success, errno otherwise
1716 */
1717
1718 static int getsockopt(struct socket *sock,
1719 int lvl, int opt, char __user *ov, int __user *ol)
1720 {
1721 struct sock *sk = sock->sk;
1722 struct tipc_port *tport = tipc_sk_port(sk);
1723 int len;
1724 u32 value;
1725 int res;
1726
1727 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1728 return put_user(0, ol);
1729 if (lvl != SOL_TIPC)
1730 return -ENOPROTOOPT;
1731 if ((res = get_user(len, ol)))
1732 return res;
1733
1734 lock_sock(sk);
1735
1736 switch (opt) {
1737 case TIPC_IMPORTANCE:
1738 res = tipc_portimportance(tport->ref, &value);
1739 break;
1740 case TIPC_SRC_DROPPABLE:
1741 res = tipc_portunreliable(tport->ref, &value);
1742 break;
1743 case TIPC_DEST_DROPPABLE:
1744 res = tipc_portunreturnable(tport->ref, &value);
1745 break;
1746 case TIPC_CONN_TIMEOUT:
1747 value = jiffies_to_msecs(sk->sk_rcvtimeo);
1748 /* no need to set "res", since already 0 at this point */
1749 break;
1750 default:
1751 res = -EINVAL;
1752 }
1753
1754 release_sock(sk);
1755
1756 if (res) {
1757 /* "get" failed */
1758 }
1759 else if (len < sizeof(value)) {
1760 res = -EINVAL;
1761 }
1762 else if (copy_to_user(ov, &value, sizeof(value))) {
1763 res = -EFAULT;
1764 }
1765 else {
1766 res = put_user(sizeof(value), ol);
1767 }
1768
1769 return res;
1770 }
1771
1772 /**
1773 * Protocol switches for the various types of TIPC sockets
1774 */
1775
1776 static const struct proto_ops msg_ops = {
1777 .owner = THIS_MODULE,
1778 .family = AF_TIPC,
1779 .release = release,
1780 .bind = bind,
1781 .connect = connect,
1782 .socketpair = sock_no_socketpair,
1783 .accept = accept,
1784 .getname = get_name,
1785 .poll = poll,
1786 .ioctl = sock_no_ioctl,
1787 .listen = listen,
1788 .shutdown = shutdown,
1789 .setsockopt = setsockopt,
1790 .getsockopt = getsockopt,
1791 .sendmsg = send_msg,
1792 .recvmsg = recv_msg,
1793 .mmap = sock_no_mmap,
1794 .sendpage = sock_no_sendpage
1795 };
1796
1797 static const struct proto_ops packet_ops = {
1798 .owner = THIS_MODULE,
1799 .family = AF_TIPC,
1800 .release = release,
1801 .bind = bind,
1802 .connect = connect,
1803 .socketpair = sock_no_socketpair,
1804 .accept = accept,
1805 .getname = get_name,
1806 .poll = poll,
1807 .ioctl = sock_no_ioctl,
1808 .listen = listen,
1809 .shutdown = shutdown,
1810 .setsockopt = setsockopt,
1811 .getsockopt = getsockopt,
1812 .sendmsg = send_packet,
1813 .recvmsg = recv_msg,
1814 .mmap = sock_no_mmap,
1815 .sendpage = sock_no_sendpage
1816 };
1817
1818 static const struct proto_ops stream_ops = {
1819 .owner = THIS_MODULE,
1820 .family = AF_TIPC,
1821 .release = release,
1822 .bind = bind,
1823 .connect = connect,
1824 .socketpair = sock_no_socketpair,
1825 .accept = accept,
1826 .getname = get_name,
1827 .poll = poll,
1828 .ioctl = sock_no_ioctl,
1829 .listen = listen,
1830 .shutdown = shutdown,
1831 .setsockopt = setsockopt,
1832 .getsockopt = getsockopt,
1833 .sendmsg = send_stream,
1834 .recvmsg = recv_stream,
1835 .mmap = sock_no_mmap,
1836 .sendpage = sock_no_sendpage
1837 };
1838
1839 static const struct net_proto_family tipc_family_ops = {
1840 .owner = THIS_MODULE,
1841 .family = AF_TIPC,
1842 .create = tipc_create
1843 };
1844
1845 static struct proto tipc_proto = {
1846 .name = "TIPC",
1847 .owner = THIS_MODULE,
1848 .obj_size = sizeof(struct tipc_sock)
1849 };
1850
1851 /**
1852 * tipc_socket_init - initialize TIPC socket interface
1853 *
1854 * Returns 0 on success, errno otherwise
1855 */
1856 int tipc_socket_init(void)
1857 {
1858 int res;
1859
1860 res = proto_register(&tipc_proto, 1);
1861 if (res) {
1862 err("Failed to register TIPC protocol type\n");
1863 goto out;
1864 }
1865
1866 res = sock_register(&tipc_family_ops);
1867 if (res) {
1868 err("Failed to register TIPC socket type\n");
1869 proto_unregister(&tipc_proto);
1870 goto out;
1871 }
1872
1873 sockets_enabled = 1;
1874 out:
1875 return res;
1876 }
1877
1878 /**
1879 * tipc_socket_stop - stop TIPC socket interface
1880 */
1881
1882 void tipc_socket_stop(void)
1883 {
1884 if (!sockets_enabled)
1885 return;
1886
1887 sockets_enabled = 0;
1888 sock_unregister(tipc_family_ops.family);
1889 proto_unregister(&tipc_proto);
1890 }
1891