2 * VMware vSockets Driver
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/types.h>
17 #include <linux/bitops.h>
18 #include <linux/cred.h>
19 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/kmod.h>
23 #include <linux/list.h>
24 #include <linux/miscdevice.h>
25 #include <linux/module.h>
26 #include <linux/mutex.h>
27 #include <linux/net.h>
28 #include <linux/poll.h>
29 #include <linux/skbuff.h>
30 #include <linux/smp.h>
31 #include <linux/socket.h>
32 #include <linux/stddef.h>
33 #include <linux/unistd.h>
34 #include <linux/wait.h>
35 #include <linux/workqueue.h>
39 #include "vmci_transport_notify.h"
41 static int vmci_transport_recv_dgram_cb(void *data
, struct vmci_datagram
*dg
);
42 static int vmci_transport_recv_stream_cb(void *data
, struct vmci_datagram
*dg
);
43 static void vmci_transport_peer_attach_cb(u32 sub_id
,
44 const struct vmci_event_data
*ed
,
46 static void vmci_transport_peer_detach_cb(u32 sub_id
,
47 const struct vmci_event_data
*ed
,
49 static void vmci_transport_recv_pkt_work(struct work_struct
*work
);
50 static int vmci_transport_recv_listen(struct sock
*sk
,
51 struct vmci_transport_packet
*pkt
);
52 static int vmci_transport_recv_connecting_server(
55 struct vmci_transport_packet
*pkt
);
56 static int vmci_transport_recv_connecting_client(
58 struct vmci_transport_packet
*pkt
);
59 static int vmci_transport_recv_connecting_client_negotiate(
61 struct vmci_transport_packet
*pkt
);
62 static int vmci_transport_recv_connecting_client_invalid(
64 struct vmci_transport_packet
*pkt
);
65 static int vmci_transport_recv_connected(struct sock
*sk
,
66 struct vmci_transport_packet
*pkt
);
67 static bool vmci_transport_old_proto_override(bool *old_pkt_proto
);
68 static u16
vmci_transport_new_proto_supported_versions(void);
69 static bool vmci_transport_proto_to_notify_struct(struct sock
*sk
, u16
*proto
,
72 struct vmci_transport_recv_pkt_info
{
73 struct work_struct work
;
75 struct vmci_transport_packet pkt
;
78 static struct vmci_handle vmci_transport_stream_handle
= { VMCI_INVALID_ID
,
80 static u32 vmci_transport_qp_resumed_sub_id
= VMCI_INVALID_ID
;
82 static int PROTOCOL_OVERRIDE
= -1;
84 #define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN 128
85 #define VMCI_TRANSPORT_DEFAULT_QP_SIZE 262144
86 #define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX 262144
88 /* The default peer timeout indicates how long we will wait for a peer response
89 * to a control message.
91 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
95 /* Helper function to convert from a VMCI error code to a VSock error code. */
97 static s32
vmci_transport_error_to_vsock_error(s32 vmci_error
)
101 switch (vmci_error
) {
102 case VMCI_ERROR_NO_MEM
:
105 case VMCI_ERROR_DUPLICATE_ENTRY
:
106 case VMCI_ERROR_ALREADY_EXISTS
:
109 case VMCI_ERROR_NO_ACCESS
:
112 case VMCI_ERROR_NO_RESOURCES
:
115 case VMCI_ERROR_INVALID_RESOURCE
:
118 case VMCI_ERROR_INVALID_ARGS
:
123 return err
> 0 ? -err
: err
;
127 vmci_transport_packet_init(struct vmci_transport_packet
*pkt
,
128 struct sockaddr_vm
*src
,
129 struct sockaddr_vm
*dst
,
133 struct vmci_transport_waiting_info
*wait
,
135 struct vmci_handle handle
)
137 /* We register the stream control handler as an any cid handle so we
138 * must always send from a source address of VMADDR_CID_ANY
140 pkt
->dg
.src
= vmci_make_handle(VMADDR_CID_ANY
,
141 VMCI_TRANSPORT_PACKET_RID
);
142 pkt
->dg
.dst
= vmci_make_handle(dst
->svm_cid
,
143 VMCI_TRANSPORT_PACKET_RID
);
144 pkt
->dg
.payload_size
= sizeof(*pkt
) - sizeof(pkt
->dg
);
145 pkt
->version
= VMCI_TRANSPORT_PACKET_VERSION
;
147 pkt
->src_port
= src
->svm_port
;
148 pkt
->dst_port
= dst
->svm_port
;
149 memset(&pkt
->proto
, 0, sizeof(pkt
->proto
));
150 memset(&pkt
->_reserved2
, 0, sizeof(pkt
->_reserved2
));
153 case VMCI_TRANSPORT_PACKET_TYPE_INVALID
:
157 case VMCI_TRANSPORT_PACKET_TYPE_REQUEST
:
158 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE
:
162 case VMCI_TRANSPORT_PACKET_TYPE_OFFER
:
163 case VMCI_TRANSPORT_PACKET_TYPE_ATTACH
:
164 pkt
->u
.handle
= handle
;
167 case VMCI_TRANSPORT_PACKET_TYPE_WROTE
:
168 case VMCI_TRANSPORT_PACKET_TYPE_READ
:
169 case VMCI_TRANSPORT_PACKET_TYPE_RST
:
173 case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN
:
177 case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ
:
178 case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE
:
179 memcpy(&pkt
->u
.wait
, wait
, sizeof(pkt
->u
.wait
));
182 case VMCI_TRANSPORT_PACKET_TYPE_REQUEST2
:
183 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2
:
191 vmci_transport_packet_get_addresses(struct vmci_transport_packet
*pkt
,
192 struct sockaddr_vm
*local
,
193 struct sockaddr_vm
*remote
)
195 vsock_addr_init(local
, pkt
->dg
.dst
.context
, pkt
->dst_port
);
196 vsock_addr_init(remote
, pkt
->dg
.src
.context
, pkt
->src_port
);
200 __vmci_transport_send_control_pkt(struct vmci_transport_packet
*pkt
,
201 struct sockaddr_vm
*src
,
202 struct sockaddr_vm
*dst
,
203 enum vmci_transport_packet_type type
,
206 struct vmci_transport_waiting_info
*wait
,
208 struct vmci_handle handle
,
213 vmci_transport_packet_init(pkt
, src
, dst
, type
, size
, mode
, wait
,
215 err
= vmci_datagram_send(&pkt
->dg
);
216 if (convert_error
&& (err
< 0))
217 return vmci_transport_error_to_vsock_error(err
);
223 vmci_transport_reply_control_pkt_fast(struct vmci_transport_packet
*pkt
,
224 enum vmci_transport_packet_type type
,
227 struct vmci_transport_waiting_info
*wait
,
228 struct vmci_handle handle
)
230 struct vmci_transport_packet reply
;
231 struct sockaddr_vm src
, dst
;
233 if (pkt
->type
== VMCI_TRANSPORT_PACKET_TYPE_RST
) {
236 vmci_transport_packet_get_addresses(pkt
, &src
, &dst
);
237 return __vmci_transport_send_control_pkt(&reply
, &src
, &dst
,
246 vmci_transport_send_control_pkt_bh(struct sockaddr_vm
*src
,
247 struct sockaddr_vm
*dst
,
248 enum vmci_transport_packet_type type
,
251 struct vmci_transport_waiting_info
*wait
,
252 struct vmci_handle handle
)
254 /* Note that it is safe to use a single packet across all CPUs since
255 * two tasklets of the same type are guaranteed to not ever run
256 * simultaneously. If that ever changes, or VMCI stops using tasklets,
257 * we can use per-cpu packets.
259 static struct vmci_transport_packet pkt
;
261 return __vmci_transport_send_control_pkt(&pkt
, src
, dst
, type
,
263 VSOCK_PROTO_INVALID
, handle
,
268 vmci_transport_send_control_pkt(struct sock
*sk
,
269 enum vmci_transport_packet_type type
,
272 struct vmci_transport_waiting_info
*wait
,
274 struct vmci_handle handle
)
276 struct vmci_transport_packet
*pkt
;
277 struct vsock_sock
*vsk
;
282 if (!vsock_addr_bound(&vsk
->local_addr
))
285 if (!vsock_addr_bound(&vsk
->remote_addr
))
288 pkt
= kmalloc(sizeof(*pkt
), GFP_KERNEL
);
292 err
= __vmci_transport_send_control_pkt(pkt
, &vsk
->local_addr
,
293 &vsk
->remote_addr
, type
, size
,
294 mode
, wait
, proto
, handle
,
301 static int vmci_transport_send_reset_bh(struct sockaddr_vm
*dst
,
302 struct sockaddr_vm
*src
,
303 struct vmci_transport_packet
*pkt
)
305 if (pkt
->type
== VMCI_TRANSPORT_PACKET_TYPE_RST
)
307 return vmci_transport_send_control_pkt_bh(
309 VMCI_TRANSPORT_PACKET_TYPE_RST
, 0,
310 0, NULL
, VMCI_INVALID_HANDLE
);
313 static int vmci_transport_send_reset(struct sock
*sk
,
314 struct vmci_transport_packet
*pkt
)
316 if (pkt
->type
== VMCI_TRANSPORT_PACKET_TYPE_RST
)
318 return vmci_transport_send_control_pkt(sk
,
319 VMCI_TRANSPORT_PACKET_TYPE_RST
,
320 0, 0, NULL
, VSOCK_PROTO_INVALID
,
321 VMCI_INVALID_HANDLE
);
324 static int vmci_transport_send_negotiate(struct sock
*sk
, size_t size
)
326 return vmci_transport_send_control_pkt(
328 VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE
,
331 VMCI_INVALID_HANDLE
);
334 static int vmci_transport_send_negotiate2(struct sock
*sk
, size_t size
,
337 return vmci_transport_send_control_pkt(
339 VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2
,
340 size
, 0, NULL
, version
,
341 VMCI_INVALID_HANDLE
);
344 static int vmci_transport_send_qp_offer(struct sock
*sk
,
345 struct vmci_handle handle
)
347 return vmci_transport_send_control_pkt(
348 sk
, VMCI_TRANSPORT_PACKET_TYPE_OFFER
, 0,
350 VSOCK_PROTO_INVALID
, handle
);
353 static int vmci_transport_send_attach(struct sock
*sk
,
354 struct vmci_handle handle
)
356 return vmci_transport_send_control_pkt(
357 sk
, VMCI_TRANSPORT_PACKET_TYPE_ATTACH
,
358 0, 0, NULL
, VSOCK_PROTO_INVALID
,
362 static int vmci_transport_reply_reset(struct vmci_transport_packet
*pkt
)
364 return vmci_transport_reply_control_pkt_fast(
366 VMCI_TRANSPORT_PACKET_TYPE_RST
,
368 VMCI_INVALID_HANDLE
);
371 static int vmci_transport_send_invalid_bh(struct sockaddr_vm
*dst
,
372 struct sockaddr_vm
*src
)
374 return vmci_transport_send_control_pkt_bh(
376 VMCI_TRANSPORT_PACKET_TYPE_INVALID
,
377 0, 0, NULL
, VMCI_INVALID_HANDLE
);
380 int vmci_transport_send_wrote_bh(struct sockaddr_vm
*dst
,
381 struct sockaddr_vm
*src
)
383 return vmci_transport_send_control_pkt_bh(
385 VMCI_TRANSPORT_PACKET_TYPE_WROTE
, 0,
386 0, NULL
, VMCI_INVALID_HANDLE
);
389 int vmci_transport_send_read_bh(struct sockaddr_vm
*dst
,
390 struct sockaddr_vm
*src
)
392 return vmci_transport_send_control_pkt_bh(
394 VMCI_TRANSPORT_PACKET_TYPE_READ
, 0,
395 0, NULL
, VMCI_INVALID_HANDLE
);
398 int vmci_transport_send_wrote(struct sock
*sk
)
400 return vmci_transport_send_control_pkt(
401 sk
, VMCI_TRANSPORT_PACKET_TYPE_WROTE
, 0,
402 0, NULL
, VSOCK_PROTO_INVALID
,
403 VMCI_INVALID_HANDLE
);
406 int vmci_transport_send_read(struct sock
*sk
)
408 return vmci_transport_send_control_pkt(
409 sk
, VMCI_TRANSPORT_PACKET_TYPE_READ
, 0,
410 0, NULL
, VSOCK_PROTO_INVALID
,
411 VMCI_INVALID_HANDLE
);
414 int vmci_transport_send_waiting_write(struct sock
*sk
,
415 struct vmci_transport_waiting_info
*wait
)
417 return vmci_transport_send_control_pkt(
418 sk
, VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE
,
419 0, 0, wait
, VSOCK_PROTO_INVALID
,
420 VMCI_INVALID_HANDLE
);
423 int vmci_transport_send_waiting_read(struct sock
*sk
,
424 struct vmci_transport_waiting_info
*wait
)
426 return vmci_transport_send_control_pkt(
427 sk
, VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ
,
428 0, 0, wait
, VSOCK_PROTO_INVALID
,
429 VMCI_INVALID_HANDLE
);
432 static int vmci_transport_shutdown(struct vsock_sock
*vsk
, int mode
)
434 return vmci_transport_send_control_pkt(
436 VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN
,
439 VMCI_INVALID_HANDLE
);
442 static int vmci_transport_send_conn_request(struct sock
*sk
, size_t size
)
444 return vmci_transport_send_control_pkt(sk
,
445 VMCI_TRANSPORT_PACKET_TYPE_REQUEST
,
448 VMCI_INVALID_HANDLE
);
451 static int vmci_transport_send_conn_request2(struct sock
*sk
, size_t size
,
454 return vmci_transport_send_control_pkt(
455 sk
, VMCI_TRANSPORT_PACKET_TYPE_REQUEST2
,
456 size
, 0, NULL
, version
,
457 VMCI_INVALID_HANDLE
);
460 static struct sock
*vmci_transport_get_pending(
461 struct sock
*listener
,
462 struct vmci_transport_packet
*pkt
)
464 struct vsock_sock
*vlistener
;
465 struct vsock_sock
*vpending
;
466 struct sock
*pending
;
467 struct sockaddr_vm src
;
469 vsock_addr_init(&src
, pkt
->dg
.src
.context
, pkt
->src_port
);
471 vlistener
= vsock_sk(listener
);
473 list_for_each_entry(vpending
, &vlistener
->pending_links
,
475 if (vsock_addr_equals_addr(&src
, &vpending
->remote_addr
) &&
476 pkt
->dst_port
== vpending
->local_addr
.svm_port
) {
477 pending
= sk_vsock(vpending
);
489 static void vmci_transport_release_pending(struct sock
*pending
)
494 /* We allow two kinds of sockets to communicate with a restricted VM: 1)
495 * trusted sockets 2) sockets from applications running as the same user as the
496 * VM (this is only true for the host side and only when using hosted products)
499 static bool vmci_transport_is_trusted(struct vsock_sock
*vsock
, u32 peer_cid
)
501 return vsock
->trusted
||
502 vmci_is_context_owner(peer_cid
, vsock
->owner
->uid
);
505 /* We allow sending datagrams to and receiving datagrams from a restricted VM
506 * only if it is trusted as described in vmci_transport_is_trusted.
509 static bool vmci_transport_allow_dgram(struct vsock_sock
*vsock
, u32 peer_cid
)
511 if (vsock
->cached_peer
!= peer_cid
) {
512 vsock
->cached_peer
= peer_cid
;
513 if (!vmci_transport_is_trusted(vsock
, peer_cid
) &&
514 (vmci_context_get_priv_flags(peer_cid
) &
515 VMCI_PRIVILEGE_FLAG_RESTRICTED
)) {
516 vsock
->cached_peer_allow_dgram
= false;
518 vsock
->cached_peer_allow_dgram
= true;
522 return vsock
->cached_peer_allow_dgram
;
526 vmci_transport_queue_pair_alloc(struct vmci_qp
**qpair
,
527 struct vmci_handle
*handle
,
530 u32 peer
, u32 flags
, bool trusted
)
535 /* Try to allocate our queue pair as trusted. This will only
536 * work if vsock is running in the host.
539 err
= vmci_qpair_alloc(qpair
, handle
, produce_size
,
542 VMCI_PRIVILEGE_FLAG_TRUSTED
);
543 if (err
!= VMCI_ERROR_NO_ACCESS
)
548 err
= vmci_qpair_alloc(qpair
, handle
, produce_size
, consume_size
,
549 peer
, flags
, VMCI_NO_PRIVILEGE_FLAGS
);
552 pr_err("Could not attach to queue pair with %d\n",
554 err
= vmci_transport_error_to_vsock_error(err
);
561 vmci_transport_datagram_create_hnd(u32 resource_id
,
563 vmci_datagram_recv_cb recv_cb
,
565 struct vmci_handle
*out_handle
)
569 /* Try to allocate our datagram handler as trusted. This will only work
570 * if vsock is running in the host.
573 err
= vmci_datagram_create_handle_priv(resource_id
, flags
,
574 VMCI_PRIVILEGE_FLAG_TRUSTED
,
576 client_data
, out_handle
);
578 if (err
== VMCI_ERROR_NO_ACCESS
)
579 err
= vmci_datagram_create_handle(resource_id
, flags
,
580 recv_cb
, client_data
,
586 /* This is invoked as part of a tasklet that's scheduled when the VMCI
587 * interrupt fires. This is run in bottom-half context and if it ever needs to
588 * sleep it should defer that work to a work queue.
591 static int vmci_transport_recv_dgram_cb(void *data
, struct vmci_datagram
*dg
)
596 struct vsock_sock
*vsk
;
598 sk
= (struct sock
*)data
;
600 /* This handler is privileged when this module is running on the host.
601 * We will get datagrams from all endpoints (even VMs that are in a
602 * restricted context). If we get one from a restricted context then
603 * the destination socket must be trusted.
605 * NOTE: We access the socket struct without holding the lock here.
606 * This is ok because the field we are interested is never modified
607 * outside of the create and destruct socket functions.
610 if (!vmci_transport_allow_dgram(vsk
, dg
->src
.context
))
611 return VMCI_ERROR_NO_ACCESS
;
613 size
= VMCI_DG_SIZE(dg
);
615 /* Attach the packet to the socket's receive queue as an sk_buff. */
616 skb
= alloc_skb(size
, GFP_ATOMIC
);
618 /* sk_receive_skb() will do a sock_put(), so hold here. */
621 memcpy(skb
->data
, dg
, size
);
622 sk_receive_skb(sk
, skb
, 0);
628 static bool vmci_transport_stream_allow(u32 cid
, u32 port
)
630 static const u32 non_socket_contexts
[] = {
631 VMADDR_CID_HYPERVISOR
,
636 BUILD_BUG_ON(sizeof(cid
) != sizeof(*non_socket_contexts
));
638 for (i
= 0; i
< ARRAY_SIZE(non_socket_contexts
); i
++) {
639 if (cid
== non_socket_contexts
[i
])
646 /* This is invoked as part of a tasklet that's scheduled when the VMCI
647 * interrupt fires. This is run in bottom-half context but it defers most of
648 * its work to the packet handling work queue.
651 static int vmci_transport_recv_stream_cb(void *data
, struct vmci_datagram
*dg
)
654 struct sockaddr_vm dst
;
655 struct sockaddr_vm src
;
656 struct vmci_transport_packet
*pkt
;
657 struct vsock_sock
*vsk
;
663 bh_process_pkt
= false;
665 /* Ignore incoming packets from contexts without sockets, or resources
666 * that aren't vsock implementations.
669 if (!vmci_transport_stream_allow(dg
->src
.context
, -1)
670 || VMCI_TRANSPORT_PACKET_RID
!= dg
->src
.resource
)
671 return VMCI_ERROR_NO_ACCESS
;
673 if (VMCI_DG_SIZE(dg
) < sizeof(*pkt
))
674 /* Drop datagrams that do not contain full VSock packets. */
675 return VMCI_ERROR_INVALID_ARGS
;
677 pkt
= (struct vmci_transport_packet
*)dg
;
679 /* Find the socket that should handle this packet. First we look for a
680 * connected socket and if there is none we look for a socket bound to
681 * the destintation address.
683 vsock_addr_init(&src
, pkt
->dg
.src
.context
, pkt
->src_port
);
684 vsock_addr_init(&dst
, pkt
->dg
.dst
.context
, pkt
->dst_port
);
686 sk
= vsock_find_connected_socket(&src
, &dst
);
688 sk
= vsock_find_bound_socket(&dst
);
690 /* We could not find a socket for this specified
691 * address. If this packet is a RST, we just drop it.
692 * If it is another packet, we send a RST. Note that
693 * we do not send a RST reply to RSTs so that we do not
694 * continually send RSTs between two endpoints.
696 * Note that since this is a reply, dst is src and src
699 if (vmci_transport_send_reset_bh(&dst
, &src
, pkt
) < 0)
700 pr_err("unable to send reset\n");
702 err
= VMCI_ERROR_NOT_FOUND
;
707 /* If the received packet type is beyond all types known to this
708 * implementation, reply with an invalid message. Hopefully this will
709 * help when implementing backwards compatibility in the future.
711 if (pkt
->type
>= VMCI_TRANSPORT_PACKET_TYPE_MAX
) {
712 vmci_transport_send_invalid_bh(&dst
, &src
);
713 err
= VMCI_ERROR_INVALID_ARGS
;
717 /* This handler is privileged when this module is running on the host.
718 * We will get datagram connect requests from all endpoints (even VMs
719 * that are in a restricted context). If we get one from a restricted
720 * context then the destination socket must be trusted.
722 * NOTE: We access the socket struct without holding the lock here.
723 * This is ok because the field we are interested is never modified
724 * outside of the create and destruct socket functions.
727 if (!vmci_transport_allow_dgram(vsk
, pkt
->dg
.src
.context
)) {
728 err
= VMCI_ERROR_NO_ACCESS
;
732 /* We do most everything in a work queue, but let's fast path the
733 * notification of reads and writes to help data transfer performance.
734 * We can only do this if there is no process context code executing
735 * for this socket since that may change the state.
739 if (!sock_owned_by_user(sk
)) {
740 /* The local context ID may be out of date, update it. */
741 vsk
->local_addr
.svm_cid
= dst
.svm_cid
;
743 if (sk
->sk_state
== SS_CONNECTED
)
744 vmci_trans(vsk
)->notify_ops
->handle_notify_pkt(
745 sk
, pkt
, true, &dst
, &src
,
751 if (!bh_process_pkt
) {
752 struct vmci_transport_recv_pkt_info
*recv_pkt_info
;
754 recv_pkt_info
= kmalloc(sizeof(*recv_pkt_info
), GFP_ATOMIC
);
755 if (!recv_pkt_info
) {
756 if (vmci_transport_send_reset_bh(&dst
, &src
, pkt
) < 0)
757 pr_err("unable to send reset\n");
759 err
= VMCI_ERROR_NO_MEM
;
763 recv_pkt_info
->sk
= sk
;
764 memcpy(&recv_pkt_info
->pkt
, pkt
, sizeof(recv_pkt_info
->pkt
));
765 INIT_WORK(&recv_pkt_info
->work
, vmci_transport_recv_pkt_work
);
767 schedule_work(&recv_pkt_info
->work
);
768 /* Clear sk so that the reference count incremented by one of
769 * the Find functions above is not decremented below. We need
770 * that reference count for the packet handler we've scheduled
783 static void vmci_transport_peer_attach_cb(u32 sub_id
,
784 const struct vmci_event_data
*e_data
,
787 struct sock
*sk
= client_data
;
788 const struct vmci_event_payload_qp
*e_payload
;
789 struct vsock_sock
*vsk
;
791 e_payload
= vmci_event_data_const_payload(e_data
);
795 /* We don't ask for delayed CBs when we subscribe to this event (we
796 * pass 0 as flags to vmci_event_subscribe()). VMCI makes no
797 * guarantees in that case about what context we might be running in,
798 * so it could be BH or process, blockable or non-blockable. So we
799 * need to account for all possible contexts here.
804 /* XXX This is lame, we should provide a way to lookup sockets by
807 if (vmci_handle_is_equal(vmci_trans(vsk
)->qp_handle
,
808 e_payload
->handle
)) {
809 /* XXX This doesn't do anything, but in the future we may want
810 * to set a flag here to verify the attach really did occur and
811 * we weren't just sent a datagram claiming it was.
821 static void vmci_transport_handle_detach(struct sock
*sk
)
823 struct vsock_sock
*vsk
;
826 if (!vmci_handle_is_invalid(vmci_trans(vsk
)->qp_handle
)) {
827 sock_set_flag(sk
, SOCK_DONE
);
829 /* On a detach the peer will not be sending or receiving
832 vsk
->peer_shutdown
= SHUTDOWN_MASK
;
834 /* We should not be sending anymore since the peer won't be
835 * there to receive, but we can still receive if there is data
836 * left in our consume queue.
838 if (vsock_stream_has_data(vsk
) <= 0) {
839 if (sk
->sk_state
== SS_CONNECTING
) {
840 /* The peer may detach from a queue pair while
841 * we are still in the connecting state, i.e.,
842 * if the peer VM is killed after attaching to
843 * a queue pair, but before we complete the
844 * handshake. In that case, we treat the detach
845 * event like a reset.
848 sk
->sk_state
= SS_UNCONNECTED
;
849 sk
->sk_err
= ECONNRESET
;
850 sk
->sk_error_report(sk
);
853 sk
->sk_state
= SS_UNCONNECTED
;
855 sk
->sk_state_change(sk
);
859 static void vmci_transport_peer_detach_cb(u32 sub_id
,
860 const struct vmci_event_data
*e_data
,
863 struct sock
*sk
= client_data
;
864 const struct vmci_event_payload_qp
*e_payload
;
865 struct vsock_sock
*vsk
;
867 e_payload
= vmci_event_data_const_payload(e_data
);
869 if (vmci_handle_is_invalid(e_payload
->handle
))
872 /* Same rules for locking as for peer_attach_cb(). */
876 /* XXX This is lame, we should provide a way to lookup sockets by
879 if (vmci_handle_is_equal(vmci_trans(vsk
)->qp_handle
,
881 vmci_transport_handle_detach(sk
);
887 static void vmci_transport_qp_resumed_cb(u32 sub_id
,
888 const struct vmci_event_data
*e_data
,
891 vsock_for_each_connected_socket(vmci_transport_handle_detach
);
894 static void vmci_transport_recv_pkt_work(struct work_struct
*work
)
896 struct vmci_transport_recv_pkt_info
*recv_pkt_info
;
897 struct vmci_transport_packet
*pkt
;
901 container_of(work
, struct vmci_transport_recv_pkt_info
, work
);
902 sk
= recv_pkt_info
->sk
;
903 pkt
= &recv_pkt_info
->pkt
;
907 /* The local context ID may be out of date. */
908 vsock_sk(sk
)->local_addr
.svm_cid
= pkt
->dg
.dst
.context
;
910 switch (sk
->sk_state
) {
912 vmci_transport_recv_listen(sk
, pkt
);
915 /* Processing of pending connections for servers goes through
916 * the listening socket, so see vmci_transport_recv_listen()
919 vmci_transport_recv_connecting_client(sk
, pkt
);
922 vmci_transport_recv_connected(sk
, pkt
);
925 /* Because this function does not run in the same context as
926 * vmci_transport_recv_stream_cb it is possible that the
927 * socket has closed. We need to let the other side know or it
928 * could be sitting in a connect and hang forever. Send a
929 * reset to prevent that.
931 vmci_transport_send_reset(sk
, pkt
);
937 kfree(recv_pkt_info
);
938 /* Release reference obtained in the stream callback when we fetched
939 * this socket out of the bound or connected list.
944 static int vmci_transport_recv_listen(struct sock
*sk
,
945 struct vmci_transport_packet
*pkt
)
947 struct sock
*pending
;
948 struct vsock_sock
*vpending
;
951 bool old_request
= false;
952 bool old_pkt_proto
= false;
956 /* Because we are in the listen state, we could be receiving a packet
957 * for ourself or any previous connection requests that we received.
958 * If it's the latter, we try to find a socket in our list of pending
959 * connections and, if we do, call the appropriate handler for the
960 * state that that socket is in. Otherwise we try to service the
961 * connection request.
963 pending
= vmci_transport_get_pending(sk
, pkt
);
967 /* The local context ID may be out of date. */
968 vsock_sk(pending
)->local_addr
.svm_cid
= pkt
->dg
.dst
.context
;
970 switch (pending
->sk_state
) {
972 err
= vmci_transport_recv_connecting_server(sk
,
977 vmci_transport_send_reset(pending
, pkt
);
982 vsock_remove_pending(sk
, pending
);
984 release_sock(pending
);
985 vmci_transport_release_pending(pending
);
990 /* The listen state only accepts connection requests. Reply with a
991 * reset unless we received a reset.
994 if (!(pkt
->type
== VMCI_TRANSPORT_PACKET_TYPE_REQUEST
||
995 pkt
->type
== VMCI_TRANSPORT_PACKET_TYPE_REQUEST2
)) {
996 vmci_transport_reply_reset(pkt
);
1000 if (pkt
->u
.size
== 0) {
1001 vmci_transport_reply_reset(pkt
);
1005 /* If this socket can't accommodate this connection request, we send a
1006 * reset. Otherwise we create and initialize a child socket and reply
1007 * with a connection negotiation.
1009 if (sk
->sk_ack_backlog
>= sk
->sk_max_ack_backlog
) {
1010 vmci_transport_reply_reset(pkt
);
1011 return -ECONNREFUSED
;
1014 pending
= __vsock_create(sock_net(sk
), NULL
, sk
, GFP_KERNEL
,
1017 vmci_transport_send_reset(sk
, pkt
);
1021 vpending
= vsock_sk(pending
);
1023 vsock_addr_init(&vpending
->local_addr
, pkt
->dg
.dst
.context
,
1025 vsock_addr_init(&vpending
->remote_addr
, pkt
->dg
.src
.context
,
1028 /* If the proposed size fits within our min/max, accept it. Otherwise
1029 * propose our own size.
1031 if (pkt
->u
.size
>= vmci_trans(vpending
)->queue_pair_min_size
&&
1032 pkt
->u
.size
<= vmci_trans(vpending
)->queue_pair_max_size
) {
1033 qp_size
= pkt
->u
.size
;
1035 qp_size
= vmci_trans(vpending
)->queue_pair_size
;
1038 /* Figure out if we are using old or new requests based on the
1039 * overrides pkt types sent by our peer.
1041 if (vmci_transport_old_proto_override(&old_pkt_proto
)) {
1042 old_request
= old_pkt_proto
;
1044 if (pkt
->type
== VMCI_TRANSPORT_PACKET_TYPE_REQUEST
)
1046 else if (pkt
->type
== VMCI_TRANSPORT_PACKET_TYPE_REQUEST2
)
1047 old_request
= false;
1052 /* Handle a REQUEST (or override) */
1053 u16 version
= VSOCK_PROTO_INVALID
;
1054 if (vmci_transport_proto_to_notify_struct(
1055 pending
, &version
, true))
1056 err
= vmci_transport_send_negotiate(pending
, qp_size
);
1061 /* Handle a REQUEST2 (or override) */
1062 int proto_int
= pkt
->proto
;
1064 u16 active_proto_version
= 0;
1066 /* The list of possible protocols is the intersection of all
1067 * protocols the client supports ... plus all the protocols we
1070 proto_int
&= vmci_transport_new_proto_supported_versions();
1072 /* We choose the highest possible protocol version and use that
1075 pos
= fls(proto_int
);
1077 active_proto_version
= (1 << (pos
- 1));
1078 if (vmci_transport_proto_to_notify_struct(
1079 pending
, &active_proto_version
, false))
1080 err
= vmci_transport_send_negotiate2(pending
,
1082 active_proto_version
);
1092 vmci_transport_send_reset(sk
, pkt
);
1094 err
= vmci_transport_error_to_vsock_error(err
);
1098 vsock_add_pending(sk
, pending
);
1099 sk
->sk_ack_backlog
++;
1101 pending
->sk_state
= SS_CONNECTING
;
1102 vmci_trans(vpending
)->produce_size
=
1103 vmci_trans(vpending
)->consume_size
= qp_size
;
1104 vmci_trans(vpending
)->queue_pair_size
= qp_size
;
1106 vmci_trans(vpending
)->notify_ops
->process_request(pending
);
1108 /* We might never receive another message for this socket and it's not
1109 * connected to any process, so we have to ensure it gets cleaned up
1110 * ourself. Our delayed work function will take care of that. Note
1111 * that we do not ever cancel this function since we have few
1112 * guarantees about its state when calling cancel_delayed_work().
1113 * Instead we hold a reference on the socket for that function and make
1114 * it capable of handling cases where it needs to do nothing but
1115 * release that reference.
1117 vpending
->listener
= sk
;
1120 INIT_DELAYED_WORK(&vpending
->dwork
, vsock_pending_work
);
1121 schedule_delayed_work(&vpending
->dwork
, HZ
);
1128 vmci_transport_recv_connecting_server(struct sock
*listener
,
1129 struct sock
*pending
,
1130 struct vmci_transport_packet
*pkt
)
1132 struct vsock_sock
*vpending
;
1133 struct vmci_handle handle
;
1134 struct vmci_qp
*qpair
;
1141 vpending
= vsock_sk(pending
);
1142 detach_sub_id
= VMCI_INVALID_ID
;
1144 switch (pkt
->type
) {
1145 case VMCI_TRANSPORT_PACKET_TYPE_OFFER
:
1146 if (vmci_handle_is_invalid(pkt
->u
.handle
)) {
1147 vmci_transport_send_reset(pending
, pkt
);
1154 /* Close and cleanup the connection. */
1155 vmci_transport_send_reset(pending
, pkt
);
1157 err
= pkt
->type
== VMCI_TRANSPORT_PACKET_TYPE_RST
? 0 : -EINVAL
;
1161 /* In order to complete the connection we need to attach to the offered
1162 * queue pair and send an attach notification. We also subscribe to the
1163 * detach event so we know when our peer goes away, and we do that
1164 * before attaching so we don't miss an event. If all this succeeds,
1165 * we update our state and wakeup anything waiting in accept() for a
1169 /* We don't care about attach since we ensure the other side has
1170 * attached by specifying the ATTACH_ONLY flag below.
1172 err
= vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH
,
1173 vmci_transport_peer_detach_cb
,
1174 pending
, &detach_sub_id
);
1175 if (err
< VMCI_SUCCESS
) {
1176 vmci_transport_send_reset(pending
, pkt
);
1177 err
= vmci_transport_error_to_vsock_error(err
);
1182 vmci_trans(vpending
)->detach_sub_id
= detach_sub_id
;
1184 /* Now attach to the queue pair the client created. */
1185 handle
= pkt
->u
.handle
;
1187 /* vpending->local_addr always has a context id so we do not need to
1188 * worry about VMADDR_CID_ANY in this case.
1191 vpending
->remote_addr
.svm_cid
== vpending
->local_addr
.svm_cid
;
1192 flags
= VMCI_QPFLAG_ATTACH_ONLY
;
1193 flags
|= is_local
? VMCI_QPFLAG_LOCAL
: 0;
1195 err
= vmci_transport_queue_pair_alloc(
1198 vmci_trans(vpending
)->produce_size
,
1199 vmci_trans(vpending
)->consume_size
,
1200 pkt
->dg
.src
.context
,
1202 vmci_transport_is_trusted(
1204 vpending
->remote_addr
.svm_cid
));
1206 vmci_transport_send_reset(pending
, pkt
);
1211 vmci_trans(vpending
)->qp_handle
= handle
;
1212 vmci_trans(vpending
)->qpair
= qpair
;
1214 /* When we send the attach message, we must be ready to handle incoming
1215 * control messages on the newly connected socket. So we move the
1216 * pending socket to the connected state before sending the attach
1217 * message. Otherwise, an incoming packet triggered by the attach being
1218 * received by the peer may be processed concurrently with what happens
1219 * below after sending the attach message, and that incoming packet
1220 * will find the listening socket instead of the (currently) pending
1221 * socket. Note that enqueueing the socket increments the reference
1222 * count, so even if a reset comes before the connection is accepted,
1223 * the socket will be valid until it is removed from the queue.
1225 * If we fail sending the attach below, we remove the socket from the
1226 * connected list and move the socket to SS_UNCONNECTED before
1227 * releasing the lock, so a pending slow path processing of an incoming
1228 * packet will not see the socket in the connected state in that case.
1230 pending
->sk_state
= SS_CONNECTED
;
1232 vsock_insert_connected(vpending
);
1234 /* Notify our peer of our attach. */
1235 err
= vmci_transport_send_attach(pending
, handle
);
1237 vsock_remove_connected(vpending
);
1238 pr_err("Could not send attach\n");
1239 vmci_transport_send_reset(pending
, pkt
);
1240 err
= vmci_transport_error_to_vsock_error(err
);
1245 /* We have a connection. Move the now connected socket from the
1246 * listener's pending list to the accept queue so callers of accept()
1249 vsock_remove_pending(listener
, pending
);
1250 vsock_enqueue_accept(listener
, pending
);
1252 /* Callers of accept() will be be waiting on the listening socket, not
1253 * the pending socket.
1255 listener
->sk_state_change(listener
);
1260 pending
->sk_err
= skerr
;
1261 pending
->sk_state
= SS_UNCONNECTED
;
1262 /* As long as we drop our reference, all necessary cleanup will handle
1263 * when the cleanup function drops its reference and our destruct
1264 * implementation is called. Note that since the listen handler will
1265 * remove pending from the pending list upon our failure, the cleanup
1266 * function won't drop the additional reference, which is why we do it
1275 vmci_transport_recv_connecting_client(struct sock
*sk
,
1276 struct vmci_transport_packet
*pkt
)
1278 struct vsock_sock
*vsk
;
1284 switch (pkt
->type
) {
1285 case VMCI_TRANSPORT_PACKET_TYPE_ATTACH
:
1286 if (vmci_handle_is_invalid(pkt
->u
.handle
) ||
1287 !vmci_handle_is_equal(pkt
->u
.handle
,
1288 vmci_trans(vsk
)->qp_handle
)) {
1294 /* Signify the socket is connected and wakeup the waiter in
1295 * connect(). Also place the socket in the connected table for
1296 * accounting (it can already be found since it's in the bound
1299 sk
->sk_state
= SS_CONNECTED
;
1300 sk
->sk_socket
->state
= SS_CONNECTED
;
1301 vsock_insert_connected(vsk
);
1302 sk
->sk_state_change(sk
);
1305 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE
:
1306 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2
:
1307 if (pkt
->u
.size
== 0
1308 || pkt
->dg
.src
.context
!= vsk
->remote_addr
.svm_cid
1309 || pkt
->src_port
!= vsk
->remote_addr
.svm_port
1310 || !vmci_handle_is_invalid(vmci_trans(vsk
)->qp_handle
)
1311 || vmci_trans(vsk
)->qpair
1312 || vmci_trans(vsk
)->produce_size
!= 0
1313 || vmci_trans(vsk
)->consume_size
!= 0
1314 || vmci_trans(vsk
)->attach_sub_id
!= VMCI_INVALID_ID
1315 || vmci_trans(vsk
)->detach_sub_id
!= VMCI_INVALID_ID
) {
1322 err
= vmci_transport_recv_connecting_client_negotiate(sk
, pkt
);
1329 case VMCI_TRANSPORT_PACKET_TYPE_INVALID
:
1330 err
= vmci_transport_recv_connecting_client_invalid(sk
, pkt
);
1337 case VMCI_TRANSPORT_PACKET_TYPE_RST
:
1338 /* Older versions of the linux code (WS 6.5 / ESX 4.0) used to
1339 * continue processing here after they sent an INVALID packet.
1340 * This meant that we got a RST after the INVALID. We ignore a
1341 * RST after an INVALID. The common code doesn't send the RST
1342 * ... so we can hang if an old version of the common code
1343 * fails between getting a REQUEST and sending an OFFER back.
1344 * Not much we can do about it... except hope that it doesn't
1347 if (vsk
->ignore_connecting_rst
) {
1348 vsk
->ignore_connecting_rst
= false;
1357 /* Close and cleanup the connection. */
1366 vmci_transport_send_reset(sk
, pkt
);
1368 sk
->sk_state
= SS_UNCONNECTED
;
1370 sk
->sk_error_report(sk
);
1374 static int vmci_transport_recv_connecting_client_negotiate(
1376 struct vmci_transport_packet
*pkt
)
1379 struct vsock_sock
*vsk
;
1380 struct vmci_handle handle
;
1381 struct vmci_qp
*qpair
;
1386 bool old_proto
= true;
1391 handle
= VMCI_INVALID_HANDLE
;
1392 attach_sub_id
= VMCI_INVALID_ID
;
1393 detach_sub_id
= VMCI_INVALID_ID
;
1395 /* If we have gotten here then we should be past the point where old
1396 * linux vsock could have sent the bogus rst.
1398 vsk
->sent_request
= false;
1399 vsk
->ignore_connecting_rst
= false;
1401 /* Verify that we're OK with the proposed queue pair size */
1402 if (pkt
->u
.size
< vmci_trans(vsk
)->queue_pair_min_size
||
1403 pkt
->u
.size
> vmci_trans(vsk
)->queue_pair_max_size
) {
1408 /* At this point we know the CID the peer is using to talk to us. */
1410 if (vsk
->local_addr
.svm_cid
== VMADDR_CID_ANY
)
1411 vsk
->local_addr
.svm_cid
= pkt
->dg
.dst
.context
;
1413 /* Setup the notify ops to be the highest supported version that both
1414 * the server and the client support.
1417 if (vmci_transport_old_proto_override(&old_pkt_proto
)) {
1418 old_proto
= old_pkt_proto
;
1420 if (pkt
->type
== VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE
)
1422 else if (pkt
->type
== VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2
)
1428 version
= VSOCK_PROTO_INVALID
;
1430 version
= pkt
->proto
;
1432 if (!vmci_transport_proto_to_notify_struct(sk
, &version
, old_proto
)) {
1437 /* Subscribe to attach and detach events first.
1439 * XXX We attach once for each queue pair created for now so it is easy
1440 * to find the socket (it's provided), but later we should only
1441 * subscribe once and add a way to lookup sockets by queue pair handle.
1443 err
= vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH
,
1444 vmci_transport_peer_attach_cb
,
1445 sk
, &attach_sub_id
);
1446 if (err
< VMCI_SUCCESS
) {
1447 err
= vmci_transport_error_to_vsock_error(err
);
1451 err
= vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH
,
1452 vmci_transport_peer_detach_cb
,
1453 sk
, &detach_sub_id
);
1454 if (err
< VMCI_SUCCESS
) {
1455 err
= vmci_transport_error_to_vsock_error(err
);
1459 /* Make VMCI select the handle for us. */
1460 handle
= VMCI_INVALID_HANDLE
;
1461 is_local
= vsk
->remote_addr
.svm_cid
== vsk
->local_addr
.svm_cid
;
1462 flags
= is_local
? VMCI_QPFLAG_LOCAL
: 0;
1464 err
= vmci_transport_queue_pair_alloc(&qpair
,
1468 vsk
->remote_addr
.svm_cid
,
1470 vmci_transport_is_trusted(
1473 remote_addr
.svm_cid
));
1477 err
= vmci_transport_send_qp_offer(sk
, handle
);
1479 err
= vmci_transport_error_to_vsock_error(err
);
1483 vmci_trans(vsk
)->qp_handle
= handle
;
1484 vmci_trans(vsk
)->qpair
= qpair
;
1486 vmci_trans(vsk
)->produce_size
= vmci_trans(vsk
)->consume_size
=
1489 vmci_trans(vsk
)->attach_sub_id
= attach_sub_id
;
1490 vmci_trans(vsk
)->detach_sub_id
= detach_sub_id
;
1492 vmci_trans(vsk
)->notify_ops
->process_negotiate(sk
);
1497 if (attach_sub_id
!= VMCI_INVALID_ID
)
1498 vmci_event_unsubscribe(attach_sub_id
);
1500 if (detach_sub_id
!= VMCI_INVALID_ID
)
1501 vmci_event_unsubscribe(detach_sub_id
);
1503 if (!vmci_handle_is_invalid(handle
))
1504 vmci_qpair_detach(&qpair
);
1510 vmci_transport_recv_connecting_client_invalid(struct sock
*sk
,
1511 struct vmci_transport_packet
*pkt
)
1514 struct vsock_sock
*vsk
= vsock_sk(sk
);
1516 if (vsk
->sent_request
) {
1517 vsk
->sent_request
= false;
1518 vsk
->ignore_connecting_rst
= true;
1520 err
= vmci_transport_send_conn_request(
1521 sk
, vmci_trans(vsk
)->queue_pair_size
);
1523 err
= vmci_transport_error_to_vsock_error(err
);
1532 static int vmci_transport_recv_connected(struct sock
*sk
,
1533 struct vmci_transport_packet
*pkt
)
1535 struct vsock_sock
*vsk
;
1536 bool pkt_processed
= false;
1538 /* In cases where we are closing the connection, it's sufficient to
1539 * mark the state change (and maybe error) and wake up any waiting
1540 * threads. Since this is a connected socket, it's owned by a user
1541 * process and will be cleaned up when the failure is passed back on
1542 * the current or next system call. Our system call implementations
1543 * must therefore check for error and state changes on entry and when
1546 switch (pkt
->type
) {
1547 case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN
:
1551 vsk
->peer_shutdown
|= pkt
->u
.mode
;
1552 sk
->sk_state_change(sk
);
1556 case VMCI_TRANSPORT_PACKET_TYPE_RST
:
1558 /* It is possible that we sent our peer a message (e.g a
1559 * WAITING_READ) right before we got notified that the peer had
1560 * detached. If that happens then we can get a RST pkt back
1561 * from our peer even though there is data available for us to
1562 * read. In that case, don't shutdown the socket completely but
1563 * instead allow the local client to finish reading data off
1564 * the queuepair. Always treat a RST pkt in connected mode like
1567 sock_set_flag(sk
, SOCK_DONE
);
1568 vsk
->peer_shutdown
= SHUTDOWN_MASK
;
1569 if (vsock_stream_has_data(vsk
) <= 0)
1570 sk
->sk_state
= SS_DISCONNECTING
;
1572 sk
->sk_state_change(sk
);
1577 vmci_trans(vsk
)->notify_ops
->handle_notify_pkt(
1578 sk
, pkt
, false, NULL
, NULL
,
1589 static int vmci_transport_socket_init(struct vsock_sock
*vsk
,
1590 struct vsock_sock
*psk
)
1592 vsk
->trans
= kmalloc(sizeof(struct vmci_transport
), GFP_KERNEL
);
1596 vmci_trans(vsk
)->dg_handle
= VMCI_INVALID_HANDLE
;
1597 vmci_trans(vsk
)->qp_handle
= VMCI_INVALID_HANDLE
;
1598 vmci_trans(vsk
)->qpair
= NULL
;
1599 vmci_trans(vsk
)->produce_size
= vmci_trans(vsk
)->consume_size
= 0;
1600 vmci_trans(vsk
)->attach_sub_id
= vmci_trans(vsk
)->detach_sub_id
=
1602 vmci_trans(vsk
)->notify_ops
= NULL
;
1604 vmci_trans(vsk
)->queue_pair_size
=
1605 vmci_trans(psk
)->queue_pair_size
;
1606 vmci_trans(vsk
)->queue_pair_min_size
=
1607 vmci_trans(psk
)->queue_pair_min_size
;
1608 vmci_trans(vsk
)->queue_pair_max_size
=
1609 vmci_trans(psk
)->queue_pair_max_size
;
1611 vmci_trans(vsk
)->queue_pair_size
=
1612 VMCI_TRANSPORT_DEFAULT_QP_SIZE
;
1613 vmci_trans(vsk
)->queue_pair_min_size
=
1614 VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN
;
1615 vmci_trans(vsk
)->queue_pair_max_size
=
1616 VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX
;
1622 static void vmci_transport_destruct(struct vsock_sock
*vsk
)
1624 if (vmci_trans(vsk
)->attach_sub_id
!= VMCI_INVALID_ID
) {
1625 vmci_event_unsubscribe(vmci_trans(vsk
)->attach_sub_id
);
1626 vmci_trans(vsk
)->attach_sub_id
= VMCI_INVALID_ID
;
1629 if (vmci_trans(vsk
)->detach_sub_id
!= VMCI_INVALID_ID
) {
1630 vmci_event_unsubscribe(vmci_trans(vsk
)->detach_sub_id
);
1631 vmci_trans(vsk
)->detach_sub_id
= VMCI_INVALID_ID
;
1634 if (!vmci_handle_is_invalid(vmci_trans(vsk
)->qp_handle
)) {
1635 vmci_qpair_detach(&vmci_trans(vsk
)->qpair
);
1636 vmci_trans(vsk
)->qp_handle
= VMCI_INVALID_HANDLE
;
1637 vmci_trans(vsk
)->produce_size
= 0;
1638 vmci_trans(vsk
)->consume_size
= 0;
1641 if (vmci_trans(vsk
)->notify_ops
)
1642 vmci_trans(vsk
)->notify_ops
->socket_destruct(vsk
);
1648 static void vmci_transport_release(struct vsock_sock
*vsk
)
1650 if (!vmci_handle_is_invalid(vmci_trans(vsk
)->dg_handle
)) {
1651 vmci_datagram_destroy_handle(vmci_trans(vsk
)->dg_handle
);
1652 vmci_trans(vsk
)->dg_handle
= VMCI_INVALID_HANDLE
;
1656 static int vmci_transport_dgram_bind(struct vsock_sock
*vsk
,
1657 struct sockaddr_vm
*addr
)
1663 /* VMCI will select a resource ID for us if we provide
1666 port
= addr
->svm_port
== VMADDR_PORT_ANY
?
1667 VMCI_INVALID_ID
: addr
->svm_port
;
1669 if (port
<= LAST_RESERVED_PORT
&& !capable(CAP_NET_BIND_SERVICE
))
1672 flags
= addr
->svm_cid
== VMADDR_CID_ANY
?
1673 VMCI_FLAG_ANYCID_DG_HND
: 0;
1675 err
= vmci_transport_datagram_create_hnd(port
, flags
,
1676 vmci_transport_recv_dgram_cb
,
1678 &vmci_trans(vsk
)->dg_handle
);
1679 if (err
< VMCI_SUCCESS
)
1680 return vmci_transport_error_to_vsock_error(err
);
1681 vsock_addr_init(&vsk
->local_addr
, addr
->svm_cid
,
1682 vmci_trans(vsk
)->dg_handle
.resource
);
1687 static int vmci_transport_dgram_enqueue(
1688 struct vsock_sock
*vsk
,
1689 struct sockaddr_vm
*remote_addr
,
1694 struct vmci_datagram
*dg
;
1696 if (len
> VMCI_MAX_DG_PAYLOAD_SIZE
)
1699 if (!vmci_transport_allow_dgram(vsk
, remote_addr
->svm_cid
))
1702 /* Allocate a buffer for the user's message and our packet header. */
1703 dg
= kmalloc(len
+ sizeof(*dg
), GFP_KERNEL
);
1707 memcpy_fromiovec(VMCI_DG_PAYLOAD(dg
), iov
, len
);
1709 dg
->dst
= vmci_make_handle(remote_addr
->svm_cid
,
1710 remote_addr
->svm_port
);
1711 dg
->src
= vmci_make_handle(vsk
->local_addr
.svm_cid
,
1712 vsk
->local_addr
.svm_port
);
1713 dg
->payload_size
= len
;
1715 err
= vmci_datagram_send(dg
);
1718 return vmci_transport_error_to_vsock_error(err
);
1720 return err
- sizeof(*dg
);
1723 static int vmci_transport_dgram_dequeue(struct kiocb
*kiocb
,
1724 struct vsock_sock
*vsk
,
1725 struct msghdr
*msg
, size_t len
,
1730 struct vmci_datagram
*dg
;
1732 struct sk_buff
*skb
;
1734 noblock
= flags
& MSG_DONTWAIT
;
1736 if (flags
& MSG_OOB
|| flags
& MSG_ERRQUEUE
)
1739 /* Retrieve the head sk_buff from the socket's receive queue. */
1741 skb
= skb_recv_datagram(&vsk
->sk
, flags
, noblock
, &err
);
1748 dg
= (struct vmci_datagram
*)skb
->data
;
1750 /* err is 0, meaning we read zero bytes. */
1753 payload_len
= dg
->payload_size
;
1754 /* Ensure the sk_buff matches the payload size claimed in the packet. */
1755 if (payload_len
!= skb
->len
- sizeof(*dg
)) {
1760 if (payload_len
> len
) {
1762 msg
->msg_flags
|= MSG_TRUNC
;
1765 /* Place the datagram payload in the user's iovec. */
1766 err
= skb_copy_datagram_iovec(skb
, sizeof(*dg
), msg
->msg_iov
,
1771 msg
->msg_namelen
= 0;
1772 if (msg
->msg_name
) {
1773 struct sockaddr_vm
*vm_addr
;
1775 /* Provide the address of the sender. */
1776 vm_addr
= (struct sockaddr_vm
*)msg
->msg_name
;
1777 vsock_addr_init(vm_addr
, dg
->src
.context
, dg
->src
.resource
);
1778 msg
->msg_namelen
= sizeof(*vm_addr
);
1783 skb_free_datagram(&vsk
->sk
, skb
);
1787 static bool vmci_transport_dgram_allow(u32 cid
, u32 port
)
1789 if (cid
== VMADDR_CID_HYPERVISOR
) {
1790 /* Registrations of PBRPC Servers do not modify VMX/Hypervisor
1791 * state and are allowed.
1793 return port
== VMCI_UNITY_PBRPC_REGISTER
;
1799 static int vmci_transport_connect(struct vsock_sock
*vsk
)
1802 bool old_pkt_proto
= false;
1803 struct sock
*sk
= &vsk
->sk
;
1805 if (vmci_transport_old_proto_override(&old_pkt_proto
) &&
1807 err
= vmci_transport_send_conn_request(
1808 sk
, vmci_trans(vsk
)->queue_pair_size
);
1810 sk
->sk_state
= SS_UNCONNECTED
;
1814 int supported_proto_versions
=
1815 vmci_transport_new_proto_supported_versions();
1816 err
= vmci_transport_send_conn_request2(
1817 sk
, vmci_trans(vsk
)->queue_pair_size
,
1818 supported_proto_versions
);
1820 sk
->sk_state
= SS_UNCONNECTED
;
1824 vsk
->sent_request
= true;
1830 static ssize_t
vmci_transport_stream_dequeue(
1831 struct vsock_sock
*vsk
,
1836 if (flags
& MSG_PEEK
)
1837 return vmci_qpair_peekv(vmci_trans(vsk
)->qpair
, iov
, len
, 0);
1839 return vmci_qpair_dequev(vmci_trans(vsk
)->qpair
, iov
, len
, 0);
1842 static ssize_t
vmci_transport_stream_enqueue(
1843 struct vsock_sock
*vsk
,
1847 return vmci_qpair_enquev(vmci_trans(vsk
)->qpair
, iov
, len
, 0);
1850 static s64
vmci_transport_stream_has_data(struct vsock_sock
*vsk
)
1852 return vmci_qpair_consume_buf_ready(vmci_trans(vsk
)->qpair
);
1855 static s64
vmci_transport_stream_has_space(struct vsock_sock
*vsk
)
1857 return vmci_qpair_produce_free_space(vmci_trans(vsk
)->qpair
);
1860 static u64
vmci_transport_stream_rcvhiwat(struct vsock_sock
*vsk
)
1862 return vmci_trans(vsk
)->consume_size
;
1865 static bool vmci_transport_stream_is_active(struct vsock_sock
*vsk
)
1867 return !vmci_handle_is_invalid(vmci_trans(vsk
)->qp_handle
);
1870 static u64
vmci_transport_get_buffer_size(struct vsock_sock
*vsk
)
1872 return vmci_trans(vsk
)->queue_pair_size
;
1875 static u64
vmci_transport_get_min_buffer_size(struct vsock_sock
*vsk
)
1877 return vmci_trans(vsk
)->queue_pair_min_size
;
1880 static u64
vmci_transport_get_max_buffer_size(struct vsock_sock
*vsk
)
1882 return vmci_trans(vsk
)->queue_pair_max_size
;
1885 static void vmci_transport_set_buffer_size(struct vsock_sock
*vsk
, u64 val
)
1887 if (val
< vmci_trans(vsk
)->queue_pair_min_size
)
1888 vmci_trans(vsk
)->queue_pair_min_size
= val
;
1889 if (val
> vmci_trans(vsk
)->queue_pair_max_size
)
1890 vmci_trans(vsk
)->queue_pair_max_size
= val
;
1891 vmci_trans(vsk
)->queue_pair_size
= val
;
1894 static void vmci_transport_set_min_buffer_size(struct vsock_sock
*vsk
,
1897 if (val
> vmci_trans(vsk
)->queue_pair_size
)
1898 vmci_trans(vsk
)->queue_pair_size
= val
;
1899 vmci_trans(vsk
)->queue_pair_min_size
= val
;
1902 static void vmci_transport_set_max_buffer_size(struct vsock_sock
*vsk
,
1905 if (val
< vmci_trans(vsk
)->queue_pair_size
)
1906 vmci_trans(vsk
)->queue_pair_size
= val
;
1907 vmci_trans(vsk
)->queue_pair_max_size
= val
;
1910 static int vmci_transport_notify_poll_in(
1911 struct vsock_sock
*vsk
,
1913 bool *data_ready_now
)
1915 return vmci_trans(vsk
)->notify_ops
->poll_in(
1916 &vsk
->sk
, target
, data_ready_now
);
1919 static int vmci_transport_notify_poll_out(
1920 struct vsock_sock
*vsk
,
1922 bool *space_available_now
)
1924 return vmci_trans(vsk
)->notify_ops
->poll_out(
1925 &vsk
->sk
, target
, space_available_now
);
1928 static int vmci_transport_notify_recv_init(
1929 struct vsock_sock
*vsk
,
1931 struct vsock_transport_recv_notify_data
*data
)
1933 return vmci_trans(vsk
)->notify_ops
->recv_init(
1935 (struct vmci_transport_recv_notify_data
*)data
);
1938 static int vmci_transport_notify_recv_pre_block(
1939 struct vsock_sock
*vsk
,
1941 struct vsock_transport_recv_notify_data
*data
)
1943 return vmci_trans(vsk
)->notify_ops
->recv_pre_block(
1945 (struct vmci_transport_recv_notify_data
*)data
);
1948 static int vmci_transport_notify_recv_pre_dequeue(
1949 struct vsock_sock
*vsk
,
1951 struct vsock_transport_recv_notify_data
*data
)
1953 return vmci_trans(vsk
)->notify_ops
->recv_pre_dequeue(
1955 (struct vmci_transport_recv_notify_data
*)data
);
1958 static int vmci_transport_notify_recv_post_dequeue(
1959 struct vsock_sock
*vsk
,
1963 struct vsock_transport_recv_notify_data
*data
)
1965 return vmci_trans(vsk
)->notify_ops
->recv_post_dequeue(
1966 &vsk
->sk
, target
, copied
, data_read
,
1967 (struct vmci_transport_recv_notify_data
*)data
);
1970 static int vmci_transport_notify_send_init(
1971 struct vsock_sock
*vsk
,
1972 struct vsock_transport_send_notify_data
*data
)
1974 return vmci_trans(vsk
)->notify_ops
->send_init(
1976 (struct vmci_transport_send_notify_data
*)data
);
1979 static int vmci_transport_notify_send_pre_block(
1980 struct vsock_sock
*vsk
,
1981 struct vsock_transport_send_notify_data
*data
)
1983 return vmci_trans(vsk
)->notify_ops
->send_pre_block(
1985 (struct vmci_transport_send_notify_data
*)data
);
1988 static int vmci_transport_notify_send_pre_enqueue(
1989 struct vsock_sock
*vsk
,
1990 struct vsock_transport_send_notify_data
*data
)
1992 return vmci_trans(vsk
)->notify_ops
->send_pre_enqueue(
1994 (struct vmci_transport_send_notify_data
*)data
);
1997 static int vmci_transport_notify_send_post_enqueue(
1998 struct vsock_sock
*vsk
,
2000 struct vsock_transport_send_notify_data
*data
)
2002 return vmci_trans(vsk
)->notify_ops
->send_post_enqueue(
2004 (struct vmci_transport_send_notify_data
*)data
);
2007 static bool vmci_transport_old_proto_override(bool *old_pkt_proto
)
2009 if (PROTOCOL_OVERRIDE
!= -1) {
2010 if (PROTOCOL_OVERRIDE
== 0)
2011 *old_pkt_proto
= true;
2013 *old_pkt_proto
= false;
2015 pr_info("Proto override in use\n");
2022 static bool vmci_transport_proto_to_notify_struct(struct sock
*sk
,
2026 struct vsock_sock
*vsk
= vsock_sk(sk
);
2028 if (old_pkt_proto
) {
2029 if (*proto
!= VSOCK_PROTO_INVALID
) {
2030 pr_err("Can't set both an old and new protocol\n");
2033 vmci_trans(vsk
)->notify_ops
= &vmci_transport_notify_pkt_ops
;
2038 case VSOCK_PROTO_PKT_ON_NOTIFY
:
2039 vmci_trans(vsk
)->notify_ops
=
2040 &vmci_transport_notify_pkt_q_state_ops
;
2043 pr_err("Unknown notify protocol version\n");
2048 vmci_trans(vsk
)->notify_ops
->socket_init(sk
);
2052 static u16
vmci_transport_new_proto_supported_versions(void)
2054 if (PROTOCOL_OVERRIDE
!= -1)
2055 return PROTOCOL_OVERRIDE
;
2057 return VSOCK_PROTO_ALL_SUPPORTED
;
2060 static u32
vmci_transport_get_local_cid(void)
2062 return vmci_get_context_id();
2065 static struct vsock_transport vmci_transport
= {
2066 .init
= vmci_transport_socket_init
,
2067 .destruct
= vmci_transport_destruct
,
2068 .release
= vmci_transport_release
,
2069 .connect
= vmci_transport_connect
,
2070 .dgram_bind
= vmci_transport_dgram_bind
,
2071 .dgram_dequeue
= vmci_transport_dgram_dequeue
,
2072 .dgram_enqueue
= vmci_transport_dgram_enqueue
,
2073 .dgram_allow
= vmci_transport_dgram_allow
,
2074 .stream_dequeue
= vmci_transport_stream_dequeue
,
2075 .stream_enqueue
= vmci_transport_stream_enqueue
,
2076 .stream_has_data
= vmci_transport_stream_has_data
,
2077 .stream_has_space
= vmci_transport_stream_has_space
,
2078 .stream_rcvhiwat
= vmci_transport_stream_rcvhiwat
,
2079 .stream_is_active
= vmci_transport_stream_is_active
,
2080 .stream_allow
= vmci_transport_stream_allow
,
2081 .notify_poll_in
= vmci_transport_notify_poll_in
,
2082 .notify_poll_out
= vmci_transport_notify_poll_out
,
2083 .notify_recv_init
= vmci_transport_notify_recv_init
,
2084 .notify_recv_pre_block
= vmci_transport_notify_recv_pre_block
,
2085 .notify_recv_pre_dequeue
= vmci_transport_notify_recv_pre_dequeue
,
2086 .notify_recv_post_dequeue
= vmci_transport_notify_recv_post_dequeue
,
2087 .notify_send_init
= vmci_transport_notify_send_init
,
2088 .notify_send_pre_block
= vmci_transport_notify_send_pre_block
,
2089 .notify_send_pre_enqueue
= vmci_transport_notify_send_pre_enqueue
,
2090 .notify_send_post_enqueue
= vmci_transport_notify_send_post_enqueue
,
2091 .shutdown
= vmci_transport_shutdown
,
2092 .set_buffer_size
= vmci_transport_set_buffer_size
,
2093 .set_min_buffer_size
= vmci_transport_set_min_buffer_size
,
2094 .set_max_buffer_size
= vmci_transport_set_max_buffer_size
,
2095 .get_buffer_size
= vmci_transport_get_buffer_size
,
2096 .get_min_buffer_size
= vmci_transport_get_min_buffer_size
,
2097 .get_max_buffer_size
= vmci_transport_get_max_buffer_size
,
2098 .get_local_cid
= vmci_transport_get_local_cid
,
2101 static int __init
vmci_transport_init(void)
2105 /* Create the datagram handle that we will use to send and receive all
2106 * VSocket control messages for this context.
2108 err
= vmci_transport_datagram_create_hnd(VMCI_TRANSPORT_PACKET_RID
,
2109 VMCI_FLAG_ANYCID_DG_HND
,
2110 vmci_transport_recv_stream_cb
,
2112 &vmci_transport_stream_handle
);
2113 if (err
< VMCI_SUCCESS
) {
2114 pr_err("Unable to create datagram handle. (%d)\n", err
);
2115 return vmci_transport_error_to_vsock_error(err
);
2118 err
= vmci_event_subscribe(VMCI_EVENT_QP_RESUMED
,
2119 vmci_transport_qp_resumed_cb
,
2120 NULL
, &vmci_transport_qp_resumed_sub_id
);
2121 if (err
< VMCI_SUCCESS
) {
2122 pr_err("Unable to subscribe to resumed event. (%d)\n", err
);
2123 err
= vmci_transport_error_to_vsock_error(err
);
2124 vmci_transport_qp_resumed_sub_id
= VMCI_INVALID_ID
;
2125 goto err_destroy_stream_handle
;
2128 err
= vsock_core_init(&vmci_transport
);
2130 goto err_unsubscribe
;
2135 vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id
);
2136 err_destroy_stream_handle
:
2137 vmci_datagram_destroy_handle(vmci_transport_stream_handle
);
2140 module_init(vmci_transport_init
);
2142 static void __exit
vmci_transport_exit(void)
2144 if (!vmci_handle_is_invalid(vmci_transport_stream_handle
)) {
2145 if (vmci_datagram_destroy_handle(
2146 vmci_transport_stream_handle
) != VMCI_SUCCESS
)
2147 pr_err("Couldn't destroy datagram handle\n");
2148 vmci_transport_stream_handle
= VMCI_INVALID_HANDLE
;
2151 if (vmci_transport_qp_resumed_sub_id
!= VMCI_INVALID_ID
) {
2152 vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id
);
2153 vmci_transport_qp_resumed_sub_id
= VMCI_INVALID_ID
;
2158 module_exit(vmci_transport_exit
);
2160 MODULE_AUTHOR("VMware, Inc.");
2161 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2162 MODULE_LICENSE("GPL v2");
2163 MODULE_ALIAS("vmware_vsock");
2164 MODULE_ALIAS_NETPROTO(PF_VSOCK
);