hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
}
+static void fcopy_register_done(void)
+{
+ pr_debug("FCP: userspace daemon registered\n");
+ hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
+}
+
static int fcopy_handle_handshake(u32 version)
{
u32 our_ver = FCOPY_CURRENT_VERSION;
break;
case FCOPY_VERSION_1:
/* Daemon expects us to reply with our own version */
- if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver)))
+ if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
+ fcopy_register_done))
return -EFAULT;
dm_reg_value = version;
break;
*/
return -EINVAL;
}
- pr_debug("FCP: userspace daemon ver. %d registered\n", version);
- hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
+ pr_debug("FCP: userspace daemon ver. %d connected\n", version);
return 0;
}
}
fcopy_transaction.state = HVUTIL_USERSPACE_REQ;
- rc = hvutil_transport_send(hvt, out_src, out_len);
+ rc = hvutil_transport_send(hvt, out_src, out_len, NULL);
if (rc) {
pr_debug("FCP: failed to communicate to the daemon: %d\n", rc);
if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
hv_kvp_onchannelcallback(channel);
}
+static void kvp_register_done(void)
+{
+ /*
+ * If we're still negotiating with the host cancel the timeout
+ * work to not poll the channel twice.
+ */
+ pr_debug("KVP: userspace daemon registered\n");
+ cancel_delayed_work_sync(&kvp_host_handshake_work);
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
+}
+
static void
kvp_register(int reg_value)
{
kvp_msg->kvp_hdr.operation = reg_value;
strcpy(version, HV_DRV_VERSION);
- hvutil_transport_send(hvt, kvp_msg, sizeof(*kvp_msg));
+ hvutil_transport_send(hvt, kvp_msg, sizeof(*kvp_msg),
+ kvp_register_done);
kfree(kvp_msg);
}
}
/*
* We have a compatible daemon; complete the handshake.
*/
- pr_debug("KVP: userspace daemon ver. %d registered\n",
- KVP_OP_REGISTER);
+ pr_debug("KVP: userspace daemon ver. %d connected\n",
+ msg->kvp_hdr.operation);
kvp_register(dm_reg_value);
- /*
- * If we're still negotiating with the host cancel the timeout
- * work to not poll the channel twice.
- */
- cancel_delayed_work_sync(&kvp_host_handshake_work);
- hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
-
return 0;
}
}
kvp_transaction.state = HVUTIL_USERSPACE_REQ;
- rc = hvutil_transport_send(hvt, message, sizeof(*message));
+ rc = hvutil_transport_send(hvt, message, sizeof(*message), NULL);
if (rc) {
pr_debug("KVP: failed to communicate to the daemon: %d\n", rc);
if (cancel_delayed_work_sync(&kvp_timeout_work)) {
hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
}
+static void vss_register_done(void)
+{
+ hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
+ pr_debug("VSS: userspace daemon registered\n");
+}
+
static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
{
u32 our_ver = VSS_OP_REGISTER1;
dm_reg_value = VSS_OP_REGISTER;
break;
case VSS_OP_REGISTER1:
- /* Daemon expects us to reply with our own version*/
- if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver)))
+ /* Daemon expects us to reply with our own version */
+ if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
+ vss_register_done))
return -EFAULT;
dm_reg_value = VSS_OP_REGISTER1;
break;
default:
return -EINVAL;
}
- hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
- pr_debug("VSS: userspace daemon ver. %d registered\n", dm_reg_value);
+ pr_debug("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
return 0;
}
vss_msg->vss_hdr.operation = op;
vss_transaction.state = HVUTIL_USERSPACE_REQ;
- rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg));
+ rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
if (rc) {
pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
if (cancel_delayed_work_sync(&vss_timeout_work)) {
hvt->outmsg = NULL;
hvt->outmsg_len = 0;
+ if (hvt->on_read)
+ hvt->on_read();
+ hvt->on_read = NULL;
+
out_unlock:
mutex_unlock(&hvt->lock);
return ret;
mutex_unlock(&hvt->lock);
}
-int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
+int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len,
+ void (*on_read_cb)(void))
{
struct cn_msg *cn_msg;
int ret = 0;
memcpy(cn_msg->data, msg, len);
ret = cn_netlink_send(cn_msg, 0, 0, GFP_ATOMIC);
kfree(cn_msg);
+ /*
+ * We don't know when netlink messages are delivered but unlike
+ * in CHARDEV mode we're not blocked and we can send next
+ * messages right away.
+ */
+ if (on_read_cb)
+ on_read_cb();
return ret;
}
/* HVUTIL_TRANSPORT_CHARDEV */
if (hvt->outmsg) {
memcpy(hvt->outmsg, msg, len);
hvt->outmsg_len = len;
+ hvt->on_read = on_read_cb;
wake_up_interruptible(&hvt->outmsg_q);
} else
ret = -ENOMEM;
struct list_head list; /* hvt_list */
int (*on_msg)(void *, int); /* callback on new user message */
void (*on_reset)(void); /* callback when userspace drops */
+ void (*on_read)(void); /* callback on message read */
u8 *outmsg; /* message to the userspace */
int outmsg_len; /* its length */
wait_queue_head_t outmsg_q; /* poll/read wait queue */
u32 cn_idx, u32 cn_val,
int (*on_msg)(void *, int),
void (*on_reset)(void));
-int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len);
+int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len,
+ void (*on_read_cb)(void));
void hvutil_transport_destroy(struct hvutil_transport *hvt);
#endif /* _HV_UTILS_TRANSPORT_H */