#define to_urb(d) container_of(d, struct urb, kref)
+static DEFINE_SPINLOCK(usb_reject_lock);
+
static void urb_destroy(struct kref *kref)
{
struct urb *urb = to_urb(kref);
usb_get_urb(urb);
list_add_tail(&urb->anchor_list, &anchor->urb_list);
urb->anchor = anchor;
+
+ if (unlikely(anchor->poisoned)) {
+ spin_lock(&usb_reject_lock);
+ urb->reject++;
+ spin_unlock(&usb_reject_lock);
+ }
+
spin_unlock_irqrestore(&anchor->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_anchor_urb);
}
EXPORT_SYMBOL_GPL(usb_unlink_urb);
-static DEFINE_MUTEX(usb_reject_mutex);
/**
* usb_kill_urb - cancel a transfer request and wait for it to finish
* @urb: pointer to URB describing a previously submitted request,
might_sleep();
if (!(urb && urb->dev && urb->ep))
return;
- mutex_lock(&usb_reject_mutex);
+ spin_lock_irq(&usb_reject_lock);
++urb->reject;
- mutex_unlock(&usb_reject_mutex);
+ spin_unlock_irq(&usb_reject_lock);
usb_hcd_unlink_urb(urb, -ENOENT);
wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
- mutex_lock(&usb_reject_mutex);
+ spin_lock_irq(&usb_reject_lock);
--urb->reject;
- mutex_unlock(&usb_reject_mutex);
+ spin_unlock_irq(&usb_reject_lock);
}
EXPORT_SYMBOL_GPL(usb_kill_urb);
might_sleep();
if (!(urb && urb->dev && urb->ep))
return;
- mutex_lock(&usb_reject_mutex);
+ spin_lock_irq(&usb_reject_lock);
++urb->reject;
- mutex_unlock(&usb_reject_mutex);
+ spin_unlock_irq(&usb_reject_lock);
usb_hcd_unlink_urb(urb, -ENOENT);
wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
void usb_unpoison_urb(struct urb *urb)
{
+ unsigned long flags;
+
if (!urb)
return;
- mutex_lock(&usb_reject_mutex);
+ spin_lock_irqsave(&usb_reject_lock, flags);
--urb->reject;
- mutex_unlock(&usb_reject_mutex);
+ spin_unlock_irqrestore(&usb_reject_lock, flags);
}
EXPORT_SYMBOL_GPL(usb_unpoison_urb);
}
EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
+
+/**
+ * usb_poison_anchored_urbs - cease all traffic from an anchor
+ * @anchor: anchor the requests are bound to
+ *
+ * this allows all outstanding URBs to be poisoned starting
+ * from the back of the queue. Newly added URBs will also be
+ * poisoned
+ */
+void usb_poison_anchored_urbs(struct usb_anchor *anchor)
+{
+ struct urb *victim;
+
+ spin_lock_irq(&anchor->lock);
+ anchor->poisoned = 1;
+ while (!list_empty(&anchor->urb_list)) {
+ victim = list_entry(anchor->urb_list.prev, struct urb,
+ anchor_list);
+ /* we must make sure the URB isn't freed before we kill it*/
+ usb_get_urb(victim);
+ spin_unlock_irq(&anchor->lock);
+ /* this will unanchor the URB */
+ usb_poison_urb(victim);
+ usb_put_urb(victim);
+ spin_lock_irq(&anchor->lock);
+ }
+ spin_unlock_irq(&anchor->lock);
+}
+EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
/**
* usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
* @anchor: anchor the requests are bound to