*dma_handle = 0;
}
-void unmap_urb_setup_for_dma(struct usb_hcd *hcd, struct urb *urb)
+void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
if (urb->transfer_flags & URB_SETUP_MAP_SINGLE)
dma_unmap_single(hcd->self.controller,
/* Make it safe to call this routine more than once */
urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL);
}
-EXPORT_SYMBOL_GPL(unmap_urb_setup_for_dma);
+EXPORT_SYMBOL_GPL(usb_hcd_unmap_urb_setup_for_dma);
-void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+void usb_hcd_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
enum dma_data_direction dir;
- unmap_urb_setup_for_dma(hcd, urb);
+ usb_hcd_unmap_urb_setup_for_dma(hcd, urb);
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (urb->transfer_flags & URB_DMA_MAP_SG)
urb->transfer_flags &= ~(URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
}
-EXPORT_SYMBOL_GPL(unmap_urb_for_dma);
+EXPORT_SYMBOL_GPL(usb_hcd_unmap_urb_for_dma);
static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
}
if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
URB_SETUP_MAP_LOCAL)))
- unmap_urb_for_dma(hcd, urb);
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
}
return ret;
}
if (likely(status == 0)) {
status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
if (unlikely(status))
- unmap_urb_for_dma(hcd, urb);
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
}
}
!status))
status = -EREMOTEIO;
- unmap_urb_for_dma(hcd, urb);
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
usbmon_urb_complete(&hcd->self, urb, status);
usb_unanchor_urb(urb);
if (state == US_CTRL_SETUP) {
dir = TD_DIR_SETUP;
if (unsuitable_for_dma(urb->setup_dma))
- unmap_urb_setup_for_dma(imx21->hcd, urb);
+ usb_hcd_unmap_urb_setup_for_dma(imx21->hcd,
+ urb);
etd->dma_handle = urb->setup_dma;
etd->cpu_buffer = urb->setup_packet;
bufround = 0;
dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
bufround = (dir == TD_DIR_IN) ? 1 : 0;
if (unsuitable_for_dma(urb->transfer_dma))
- unmap_urb_for_dma(imx21->hcd, urb);
+ usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
etd->dma_handle = urb->transfer_dma;
etd->cpu_buffer = urb->transfer_buffer;
if (length > qh->maxpacket)
length = qh->maxpacket;
/* Unmap the buffer so that CPU can use it */
- unmap_urb_for_dma(musb_to_hcd(musb), urb);
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
qh->segsize = length;
if (!dma) {
/* Unmap the buffer so that CPU can use it */
- unmap_urb_for_dma(musb_to_hcd(musb), urb);
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
done = musb_host_packet_rx(musb, urb,
epnum, iso_err);
DBG(6, "read %spacket\n", done ? "last " : "");
extern int usb_hcd_unlink_urb(struct urb *urb, int status);
extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb,
int status);
-extern void unmap_urb_setup_for_dma(struct usb_hcd *, struct urb *);
-extern void unmap_urb_for_dma(struct usb_hcd *, struct urb *);
+extern void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *, struct urb *);
+extern void usb_hcd_unmap_urb_for_dma(struct usb_hcd *, struct urb *);
extern void usb_hcd_flush_endpoint(struct usb_device *udev,
struct usb_host_endpoint *ep);
extern void usb_hcd_disable_endpoint(struct usb_device *udev,