{
u32 tmp = readl(&ep->dev->regs->pciirqenb0);
- if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
+ if (ep->dev->quirks & PLX_LEGACY)
tmp |= BIT(ep->num);
else
tmp |= BIT(ep_bit[ep->num]);
if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
return -EDOM;
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
+ if (dev->quirks & PLX_SUPERSPEED) {
if ((desc->bEndpointAddress & 0x0f) >= 0x0c)
return -EDOM;
ep->is_in = !!usb_endpoint_dir_in(desc);
/* sanity check ep-e/ep-f since their fifos are small */
max = usb_endpoint_maxp(desc) & 0x1fff;
- if (ep->num > 4 && max > 64 &&
- (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY))
+ if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY))
return -ERANGE;
spin_lock_irqsave(&dev->lock, flags);
}
ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
/* Enable this endpoint */
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY) {
+ if (dev->quirks & PLX_LEGACY) {
tmp <<= ENDPOINT_TYPE;
tmp |= desc->bEndpointAddress;
/* default full fifo lines */
/* for OUT transfers, block the rx fifo until a read is posted */
if (!ep->is_in)
writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
- else if (dev->pdev->device != 0x2280) {
+ else if (!(dev->quirks & PLX_2280)) {
/* Added for 2282, Don't use nak packets on an in endpoint,
* this was ignored on 2280
*/
tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
- if (dev->pdev->device == 0x2280)
+ if (dev->quirks & PLX_2280)
tmp |= readl(&ep->regs->ep_irqenb);
writel(tmp, &ep->regs->ep_irqenb);
} else { /* dma, per-request */
/* init to our chosen defaults, notably so that we NAK OUT
* packets until the driver queues a read (+note erratum 0112)
*/
- if (!ep->is_in || ep->dev->pdev->device == 0x2280) {
+ if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
BIT(SET_NAK_OUT_PACKETS) |
BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
writel(tmp, &ep->regs->ep_rsp);
/* scrub most status bits, and flush any fifo state */
- if (ep->dev->pdev->device == 0x2280)
+ if (ep->dev->quirks & PLX_2280)
tmp = BIT(FIFO_OVERFLOW) |
BIT(FIFO_UNDERFLOW);
else
spin_lock_irqsave(&ep->dev->lock, flags);
nuke(ep);
- if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
+ if (ep->dev->quirks & PLX_SUPERSPEED)
ep_reset_338x(ep->dev->regs, ep);
else
ep_reset_228x(ep->dev->regs, ep);
if (ep->is_in)
dmacount |= BIT(DMA_DIRECTION);
if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
- ep->dev->pdev->device != 0x2280)
+ !(ep->dev->quirks & PLX_2280))
dmacount |= BIT(END_OF_CHAIN);
req->valid = valid;
struct net2280_dma_regs __iomem *dma = ep->dma;
unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
- if (ep->dev->pdev->device != 0x2280)
+ if (!(ep->dev->quirks & PLX_2280))
tmp |= BIT(END_OF_CHAIN);
writel(tmp, &dma->dmacount);
writel(readl(&dma->dmastat), &dma->dmastat);
writel(td_dma, &dma->dmadesc);
- if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
+ if (ep->dev->quirks & PLX_SUPERSPEED)
dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
writel(dmactl, &dma->dmactl);
/* DMA request while EP halted */
if (ep->dma &&
(readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)) &&
- (dev->pdev->vendor == PCI_VENDOR_ID_PLX)) {
+ (dev->quirks & PLX_SUPERSPEED)) {
int valid = 1;
if (ep->is_in) {
int expect;
} else if (!ep->is_in &&
(req->req.length % ep->ep.maxpacket) != 0) {
tmp = readl(&ep->regs->ep_stat);
- if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
+ if (ep->dev->quirks & PLX_SUPERSPEED)
return dma_done(ep, req, tmp, 0);
/* AVOID TROUBLE HERE by not issuing short reads from
static void abort_dma(struct net2280_ep *ep)
{
- if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
+ if (ep->dev->quirks & PLX_LEGACY)
return abort_dma_228x(ep);
return abort_dma_338x(ep);
}
ep->wedged = 1;
} else {
clear_halt(ep);
- if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX &&
+ if (ep->dev->quirks & PLX_SUPERSPEED &&
!list_empty(&ep->queue) && ep->td_dma)
restart_dma(ep);
ep->wedged = 0;
static void usb_reset(struct net2280 *dev)
{
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
+ if (dev->quirks & PLX_LEGACY)
return usb_reset_228x(dev);
return usb_reset_338x(dev);
}
static void usb_reinit(struct net2280 *dev)
{
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
+ if (dev->quirks & PLX_LEGACY)
return usb_reinit_228x(dev);
return usb_reinit_338x(dev);
}
static void ep0_start(struct net2280 *dev)
{
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
+ if (dev->quirks & PLX_LEGACY)
return ep0_start_228x(dev);
return ep0_start_338x(dev);
}
goto err_func;
/* Enable force-full-speed testing mode, if desired */
- if (full_speed && dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
+ if (full_speed && (dev->quirks & PLX_LEGACY))
writel(BIT(FORCE_FULL_SPEED_MODE), &dev->usb->xcvrdiag);
/* ... then enable host detection and ep0; and we're ready
*/
net2280_led_active(dev, 1);
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
+ if (dev->quirks & PLX_SUPERSPEED)
defect7374_enable_data_eps_zero(dev);
ep0_start(dev);
net2280_led_active(dev, 0);
/* Disable full-speed test mode */
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
+ if (dev->quirks & PLX_LEGACY)
writel(0, &dev->usb->xcvrdiag);
device_remove_file(&dev->pdev->dev, &dev_attr_function);
ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
ep->ep.name, t, req ? &req->req : 0);
#endif
- if (!ep->is_in || ep->dev->pdev->device == 0x2280)
+ if (!ep->is_in || (ep->dev->quirks & PLX_2280))
writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
else
/* Added for 2282 */
}
ep->stopped = 0;
dev->protocol_stall = 0;
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
+ if (dev->quirks & PLX_SUPERSPEED)
ep->is_halt = 0;
else{
- if (ep->dev->pdev->device == 0x2280)
+ if (ep->dev->quirks & PLX_2280)
tmp = BIT(FIFO_OVERFLOW) |
BIT(FIFO_UNDERFLOW);
else
cpu_to_le32s(&u.raw[0]);
cpu_to_le32s(&u.raw[1]);
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
+ if (dev->quirks & PLX_SUPERSPEED)
defect7374_workaround(dev, u.r);
tmp = 0;
} else {
ep_vdbg(dev, "%s clear halt\n", e->ep.name);
clear_halt(e);
- if (ep->dev->pdev->vendor ==
- PCI_VENDOR_ID_PLX &&
+ if ((ep->dev->quirks & PLX_SUPERSPEED) &&
!list_empty(&e->queue) && e->td_dma)
restart_dma(e);
}
if (e->ep.name == ep0name)
goto do_stall;
set_halt(e);
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX && e->dma)
+ if ((dev->quirks & PLX_SUPERSPEED) && e->dma)
abort_dma(e);
allow_status(ep);
ep_vdbg(dev, "%s set halt\n", ep->ep.name);
writel(stat, &dev->regs->irqstat1);
/* some status we can just ignore */
- if (dev->pdev->device == 0x2280)
+ if (dev->quirks & PLX_2280)
stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
BIT(SUSPEND_REQUEST_INTERRUPT) |
BIT(RESUME_INTERRUPT) |
writel(tmp, &dma->dmastat);
/* dma sync*/
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
+ if (dev->quirks & PLX_SUPERSPEED) {
u32 r_dmacount = readl(&dma->dmacount);
if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
struct net2280 *dev = _dev;
/* shared interrupt, not ours */
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY &&
+ if ((dev->quirks & PLX_LEGACY) &&
(!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
return IRQ_NONE;
/* control requests and PIO */
handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
+ if (dev->quirks & PLX_SUPERSPEED) {
/* re-enable interrupt to trigger any possible new interrupt */
u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
}
if (dev->got_irq)
free_irq(pdev->irq, dev);
- if (use_msi && dev->pdev->vendor == PCI_VENDOR_ID_PLX)
+ if (use_msi && dev->quirks & PLX_SUPERSPEED)
pci_disable_msi(pdev);
if (dev->regs)
iounmap(dev->regs);
pci_set_drvdata(pdev, dev);
spin_lock_init(&dev->lock);
+ dev->quirks = id->driver_data;
dev->pdev = pdev;
dev->gadget.ops = &net2280_ops;
- dev->gadget.max_speed = (dev->pdev->vendor == PCI_VENDOR_ID_PLX) ?
+ dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
USB_SPEED_SUPER : USB_SPEED_HIGH;
/* the "gadget" abstracts/virtualizes the controller */
dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
+ if (dev->quirks & PLX_SUPERSPEED) {
u32 fsmvalue;
u32 usbstat;
dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
goto done;
}
- if (use_msi && dev->pdev->vendor == PCI_VENDOR_ID_PLX)
+ if (use_msi && (dev->quirks & PLX_SUPERSPEED))
if (pci_enable_msi(pdev))
ep_err(dev, "Failed to enable MSI mode\n");
}
/* enable lower-overhead pci memory bursts during DMA */
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
+ if (dev->quirks & PLX_LEGACY)
writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
/*
* 256 write retries may not be enough...
writel(0, &dev->usb->usbctl);
/* Disable full-speed test mode */
- if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
+ if (dev->quirks & PLX_LEGACY)
writel(0, &dev->usb->xcvrdiag);
}
.device = 0x2280,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
+ .driver_data = PLX_LEGACY | PLX_2280,
}, {
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
.device = 0x2282,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
+ .driver_data = PLX_LEGACY,
},
{
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.device = 0x3380,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
+ .driver_data = PLX_SUPERSPEED,
},
{
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.device = 0x3382,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
+ .driver_data = PLX_SUPERSPEED,
},
{ /* end: all zeroes */ }
};