greybus: operation: use per-connection work queues
authorJohan Hovold <johan@hovoldconsulting.com>
Thu, 23 Jul 2015 08:50:02 +0000 (10:50 +0200)
committerGreg Kroah-Hartman <gregkh@google.com>
Thu, 23 Jul 2015 19:55:25 +0000 (12:55 -0700)
Replace the global operation work queue with per-connection work queues.

There is no need to keep operations strictly ordered across connections,
something which only adds unnecessary latency.

Tested-by: Rui Miguel Silva <rui.silva@linaro.org>
Signed-off-by: Johan Hovold <johan@hovoldconsulting.com>
Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
drivers/staging/greybus/connection.c
drivers/staging/greybus/connection.h
drivers/staging/greybus/operation.c

index 555625c3fcef269d5b855aba61027bb5256ebf81..b88abed2e1ad5cf4e50db6d35f1a5a42b25cb729 100644 (file)
@@ -7,6 +7,8 @@
  * Released under the GPLv2 only.
  */
 
+#include <linux/workqueue.h>
+
 #include "greybus.h"
 
 static DEFINE_SPINLOCK(gb_connections_lock);
@@ -99,6 +101,7 @@ static void gb_connection_release(struct device *dev)
 {
        struct gb_connection *connection = to_gb_connection(dev);
 
+       destroy_workqueue(connection->wq);
        kfree(connection);
 }
 
@@ -190,6 +193,11 @@ gb_connection_create_range(struct greybus_host_device *hd,
        spin_lock_init(&connection->lock);
        INIT_LIST_HEAD(&connection->operations);
 
+       connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
+                                        dev_name(parent), cport_id);
+       if (!connection->wq)
+               goto err_free_connection;
+
        connection->dev.parent = parent;
        connection->dev.bus = &greybus_bus_type;
        connection->dev.type = &greybus_connection_type;
@@ -227,6 +235,8 @@ gb_connection_create_range(struct greybus_host_device *hd,
 
        return connection;
 
+err_free_connection:
+       kfree(connection);
 err_remove_ida:
        ida_simple_remove(id_map, hd_cport_id);
 
index d8fbce4029558cddbb95b8ff41fc1772d00b37dc..6b40c4aeeeacb6f6b1c3b39a98d9cdba381d7cc0 100644 (file)
@@ -39,6 +39,8 @@ struct gb_connection {
        enum gb_connection_state        state;
        struct list_head                operations;
 
+       struct workqueue_struct         *wq;
+
        atomic_t                        op_cycle;
 
        void                            *private;
index 6b87bcd6361544e0584d9c6647cb7a0bcbca5b9b..69218aa4b7f143b263ec48b30f67a1734657c188 100644 (file)
@@ -19,9 +19,6 @@
 static struct kmem_cache *gb_operation_cache;
 static struct kmem_cache *gb_message_cache;
 
-/* Workqueue to handle Greybus operation completions. */
-static struct workqueue_struct *gb_operation_workqueue;
-
 /* Wait queue for synchronous cancellations. */
 static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
 
@@ -800,7 +797,7 @@ void greybus_message_sent(struct greybus_host_device *hd,
                gb_operation_put(operation);
        } else if (status) {
                if (gb_operation_result_set(operation, status))
-                       queue_work(gb_operation_workqueue, &operation->work);
+                       queue_work(connection->wq, &operation->work);
        }
 }
 EXPORT_SYMBOL_GPL(greybus_message_sent);
@@ -837,7 +834,7 @@ static void gb_connection_recv_request(struct gb_connection *connection,
         * request handler returns.
         */
        if (gb_operation_result_set(operation, -EINPROGRESS))
-               queue_work(gb_operation_workqueue, &operation->work);
+               queue_work(connection->wq, &operation->work);
 }
 
 /*
@@ -877,7 +874,7 @@ static void gb_connection_recv_response(struct gb_connection *connection,
        /* The rest will be handled in work queue context */
        if (gb_operation_result_set(operation, errno)) {
                memcpy(message->header, data, size);
-               queue_work(gb_operation_workqueue, &operation->work);
+               queue_work(connection->wq, &operation->work);
        }
 
        gb_operation_put(operation);
@@ -931,12 +928,14 @@ void gb_connection_recv(struct gb_connection *connection,
  */
 void gb_operation_cancel(struct gb_operation *operation, int errno)
 {
+       struct gb_connection *connection = operation->connection;
+
        if (WARN_ON(gb_operation_is_incoming(operation)))
                return;
 
        if (gb_operation_result_set(operation, errno)) {
                gb_message_cancel(operation->request);
-               queue_work(gb_operation_workqueue, &operation->work);
+               queue_work(connection->wq, &operation->work);
        }
 
        atomic_inc(&operation->waiters);
@@ -1043,15 +1042,8 @@ int __init gb_operation_init(void)
        if (!gb_operation_cache)
                goto err_destroy_message_cache;
 
-       gb_operation_workqueue = alloc_workqueue("greybus_operation",
-                               WQ_UNBOUND, 1);
-       if (!gb_operation_workqueue)
-               goto err_operation;
-
        return 0;
-err_operation:
-       kmem_cache_destroy(gb_operation_cache);
-       gb_operation_cache = NULL;
+
 err_destroy_message_cache:
        kmem_cache_destroy(gb_message_cache);
        gb_message_cache = NULL;
@@ -1061,8 +1053,6 @@ err_destroy_message_cache:
 
 void gb_operation_exit(void)
 {
-       destroy_workqueue(gb_operation_workqueue);
-       gb_operation_workqueue = NULL;
        kmem_cache_destroy(gb_operation_cache);
        gb_operation_cache = NULL;
        kmem_cache_destroy(gb_message_cache);