* @cport_out_urb: array of urbs for the CPort out messages
* @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
* not.
+ * @cport_out_urb_cancelled: array of flags indicating whether the
+ * corresponding @cport_out_urb is being cancelled
* @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
*/
struct es1_ap_dev {
u8 *cport_in_buffer[NUM_CPORT_IN_URB];
struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
+ bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
spinlock_t cport_out_urb_lock;
};
/* Look in our pool of allocated urbs first, as that's the "fastest" */
for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
- if (es1->cport_out_urb_busy[i] == false) {
+ if (es1->cport_out_urb_busy[i] == false &&
+ es1->cport_out_urb_cancelled[i] == false) {
es1->cport_out_urb_busy[i] = true;
urb = es1->cport_out_urb[i];
break;
}
/*
- * Returns an opaque cookie value if successful, or a pointer coded
- * error otherwise. If the caller wishes to cancel the in-flight
- * buffer, it must supply the returned cookie to the cancel routine.
+ * Returns zero if the message was successfully queued, or a negative errno
+ * otherwise.
*/
-static void *message_send(struct greybus_host_device *hd, u16 cport_id,
+static int message_send(struct greybus_host_device *hd, u16 cport_id,
struct gb_message *message, gfp_t gfp_mask)
{
struct es1_ap_dev *es1 = hd_to_es1(hd);
size_t buffer_size;
int retval;
struct urb *urb;
+ unsigned long flags;
/*
* The data actually transferred will include an indication
*/
if (!cport_id_valid(cport_id)) {
pr_err("invalid destination cport 0x%02x\n", cport_id);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
/* Find a free urb */
urb = next_free_urb(es1, gfp_mask);
if (!urb)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
+
+ spin_lock_irqsave(&es1->cport_out_urb_lock, flags);
+ message->hcpriv = urb;
+ spin_unlock_irqrestore(&es1->cport_out_urb_lock, flags);
/* Pack the cport id into the message header */
gb_message_cport_pack(message->header, cport_id);
retval = usb_submit_urb(urb, gfp_mask);
if (retval) {
pr_err("error %d submitting URB\n", retval);
+
+ spin_lock_irqsave(&es1->cport_out_urb_lock, flags);
+ message->hcpriv = NULL;
+ spin_unlock_irqrestore(&es1->cport_out_urb_lock, flags);
+
free_urb(es1, urb);
gb_message_cport_clear(message->header);
- return ERR_PTR(retval);
+
+ return retval;
}
- return urb;
+ return 0;
}
/*
- * The cookie value supplied is the value that message_send()
- * returned to its caller. It identifies the message that should be
- * canceled. This function must also handle (which is to say,
- * ignore) a null cookie value.
+ * Can not be called in atomic context.
*/
-static void message_cancel(void *cookie)
+static void message_cancel(struct gb_message *message)
{
+ struct greybus_host_device *hd = message->operation->connection->hd;
+ struct es1_ap_dev *es1 = hd_to_es1(hd);
+ struct urb *urb;
+ int i;
- /*
- * We really should be defensive and track all outstanding
- * (sent) messages rather than trusting the cookie provided
- * is valid. For the time being, this will do.
- */
- if (cookie)
- usb_kill_urb(cookie);
+ might_sleep();
+
+ spin_lock_irq(&es1->cport_out_urb_lock);
+ urb = message->hcpriv;
+
+ /* Prevent dynamically allocated urb from being deallocated. */
+ usb_get_urb(urb);
+
+ /* Prevent pre-allocated urb from being reused. */
+ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
+ if (urb == es1->cport_out_urb[i]) {
+ es1->cport_out_urb_cancelled[i] = true;
+ break;
+ }
+ }
+ spin_unlock_irq(&es1->cport_out_urb_lock);
+
+ usb_kill_urb(urb);
+
+ if (i < NUM_CPORT_OUT_URB) {
+ spin_lock_irq(&es1->cport_out_urb_lock);
+ es1->cport_out_urb_cancelled[i] = false;
+ spin_unlock_irq(&es1->cport_out_urb_lock);
+ }
+
+ usb_free_urb(urb);
}
static struct greybus_host_driver es1_driver = {
struct greybus_host_device *hd = message->operation->connection->hd;
struct es1_ap_dev *es1 = hd_to_es1(hd);
int status = check_urb_status(urb);
+ unsigned long flags;
gb_message_cport_clear(message->header);
*/
greybus_message_sent(hd, message, status);
+ spin_lock_irqsave(&es1->cport_out_urb_lock, flags);
+ message->hcpriv = NULL;
+ spin_unlock_irqrestore(&es1->cport_out_urb_lock, flags);
+
free_urb(es1, urb);
}
* @cport_out_urb: array of urbs for the CPort out messages
* @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
* not.
+ * @cport_out_urb_cancelled: array of flags indicating whether the
+ * corresponding @cport_out_urb is being cancelled
* @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
*/
struct es1_ap_dev {
struct es1_cport_out cport_out[NUM_BULKS];
struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
+ bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
spinlock_t cport_out_urb_lock;
int cport_to_ep[CPORT_MAX];
/* Look in our pool of allocated urbs first, as that's the "fastest" */
for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
- if (es1->cport_out_urb_busy[i] == false) {
+ if (es1->cport_out_urb_busy[i] == false &&
+ es1->cport_out_urb_cancelled[i] == false) {
es1->cport_out_urb_busy[i] = true;
urb = es1->cport_out_urb[i];
break;
}
/*
- * Returns an opaque cookie value if successful, or a pointer coded
- * error otherwise. If the caller wishes to cancel the in-flight
- * buffer, it must supply the returned cookie to the cancel routine.
+ * Returns zero if the message was successfully queued, or a negative errno
+ * otherwise.
*/
-static void *message_send(struct greybus_host_device *hd, u16 cport_id,
+static int message_send(struct greybus_host_device *hd, u16 cport_id,
struct gb_message *message, gfp_t gfp_mask)
{
struct es1_ap_dev *es1 = hd_to_es1(hd);
int retval;
struct urb *urb;
int bulk_ep_set;
+ unsigned long flags;
/*
* The data actually transferred will include an indication
*/
if (!cport_id_valid(cport_id)) {
pr_err("invalid destination cport 0x%02x\n", cport_id);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
/* Find a free urb */
urb = next_free_urb(es1, gfp_mask);
if (!urb)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
+
+ spin_lock_irqsave(&es1->cport_out_urb_lock, flags);
+ message->hcpriv = urb;
+ spin_unlock_irqrestore(&es1->cport_out_urb_lock, flags);
/* Pack the cport id into the message header */
gb_message_cport_pack(message->header, cport_id);
retval = usb_submit_urb(urb, gfp_mask);
if (retval) {
pr_err("error %d submitting URB\n", retval);
+
+ spin_lock_irqsave(&es1->cport_out_urb_lock, flags);
+ message->hcpriv = NULL;
+ spin_unlock_irqrestore(&es1->cport_out_urb_lock, flags);
+
free_urb(es1, urb);
gb_message_cport_clear(message->header);
- return ERR_PTR(retval);
+
+ return retval;
}
- return urb;
+ return 0;
}
/*
- * The cookie value supplied is the value that message_send()
- * returned to its caller. It identifies the message that should be
- * canceled. This function must also handle (which is to say,
- * ignore) a null cookie value.
+ * Can not be called in atomic context.
*/
-static void message_cancel(void *cookie)
+static void message_cancel(struct gb_message *message)
{
+ struct greybus_host_device *hd = message->operation->connection->hd;
+ struct es1_ap_dev *es1 = hd_to_es1(hd);
+ struct urb *urb;
+ int i;
- /*
- * We really should be defensive and track all outstanding
- * (sent) messages rather than trusting the cookie provided
- * is valid. For the time being, this will do.
- */
- if (cookie)
- usb_kill_urb(cookie);
+ might_sleep();
+
+ spin_lock_irq(&es1->cport_out_urb_lock);
+ urb = message->hcpriv;
+
+ /* Prevent dynamically allocated urb from being deallocated. */
+ usb_get_urb(urb);
+
+ /* Prevent pre-allocated urb from being reused. */
+ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
+ if (urb == es1->cport_out_urb[i]) {
+ es1->cport_out_urb_cancelled[i] = true;
+ break;
+ }
+ }
+ spin_unlock_irq(&es1->cport_out_urb_lock);
+
+ usb_kill_urb(urb);
+
+ if (i < NUM_CPORT_OUT_URB) {
+ spin_lock_irq(&es1->cport_out_urb_lock);
+ es1->cport_out_urb_cancelled[i] = false;
+ spin_unlock_irq(&es1->cport_out_urb_lock);
+ }
+
+ usb_free_urb(urb);
}
static struct greybus_host_driver es1_driver = {
struct greybus_host_device *hd = message->operation->connection->hd;
struct es1_ap_dev *es1 = hd_to_es1(hd);
int status = check_urb_status(urb);
+ unsigned long flags;
gb_message_cport_clear(message->header);
*/
greybus_message_sent(hd, message, status);
+ spin_lock_irqsave(&es1->cport_out_urb_lock, flags);
+ message->hcpriv = NULL;
+ spin_unlock_irqrestore(&es1->cport_out_urb_lock, flags);
+
free_urb(es1, urb);
}
struct greybus_host_driver {
size_t hd_priv_size;
- void *(*message_send)(struct greybus_host_device *hd, u16 dest_cport_id,
+ int (*message_send)(struct greybus_host_device *hd, u16 dest_cport_id,
struct gb_message *message, gfp_t gfp_mask);
- void (*message_cancel)(void *cookie);
+ void (*message_cancel)(struct gb_message *message);
int (*submit_svc)(struct svc_msg *svc_msg,
struct greybus_host_device *hd);
};
/* Workqueue to handle Greybus operation completions. */
static struct workqueue_struct *gb_operation_workqueue;
-/* Protects the cookie representing whether a message is in flight */
-static DEFINE_MUTEX(gb_message_mutex);
-
/*
* Protects access to connection operations lists, as well as
* updates to operation->errno.
static int gb_message_send(struct gb_message *message)
{
struct gb_connection *connection = message->operation->connection;
- int ret = 0;
- void *cookie;
- mutex_lock(&gb_message_mutex);
- cookie = connection->hd->driver->message_send(connection->hd,
+ return connection->hd->driver->message_send(connection->hd,
connection->hd_cport_id,
message,
GFP_KERNEL);
- if (IS_ERR(cookie))
- ret = PTR_ERR(cookie);
- else
- message->cookie = cookie;
- mutex_unlock(&gb_message_mutex);
-
- return ret;
}
/*
*/
static void gb_message_cancel(struct gb_message *message)
{
- mutex_lock(&gb_message_mutex);
- if (message->cookie) {
- struct greybus_host_device *hd;
+ struct greybus_host_device *hd = message->operation->connection->hd;
- hd = message->operation->connection->hd;
- hd->driver->message_cancel(message->cookie);
- }
- mutex_unlock(&gb_message_mutex);
+ hd->driver->message_cancel(message);
}
static void gb_operation_request_handle(struct gb_operation *operation)
{
struct gb_operation *operation;
- /* Get the message and record that it is no longer in flight */
- message->cookie = NULL;
-
/*
* If the message was a response, we just need to drop our
* reference to the operation. If an error occurred, report
#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX
/*
- * Protocol code should only examine the payload and payload_size
- * fields. All other fields are intended to be private to the
- * operations core code.
+ * Protocol code should only examine the payload and payload_size fields, and
+ * host-controller drivers may use the hcpriv field. All other fields are
+ * intended to be private to the operations core code.
*/
struct gb_message {
struct gb_operation *operation;
- void *cookie;
struct gb_operation_msg_hdr *header;
void *payload;
size_t payload_size;
void *buffer;
+
+ void *hcpriv;
};
/*