{
u32 tmp = readl(&ep->dev->regs->pciirqenb0);
- if (ep->dev->pdev->vendor == 0x17cc)
+ if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
tmp |= 1 << ep->num;
else
tmp |= 1 << ep_bit[ep->num];
if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
return -EDOM;
- if (dev->pdev->vendor == 0x10b5) {
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
if ((desc->bEndpointAddress & 0x0f) >= 0x0c)
return -EDOM;
ep->is_in = !!usb_endpoint_dir_in(desc);
/* sanity check ep-e/ep-f since their fifos are small */
max = usb_endpoint_maxp (desc) & 0x1fff;
- if (ep->num > 4 && max > 64 && (dev->pdev->vendor == 0x17cc))
+ if (ep->num > 4 && max > 64 &&
+ (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY))
return -ERANGE;
spin_lock_irqsave (&dev->lock, flags);
}
ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
/* Enable this endpoint */
- if (dev->pdev->vendor == 0x17cc) {
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY) {
tmp <<= ENDPOINT_TYPE;
tmp |= desc->bEndpointAddress;
/* default full fifo lines */
spin_lock_irqsave (&ep->dev->lock, flags);
nuke (ep);
- if (ep->dev->pdev->vendor == 0x10b5)
+ if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
ep_reset_338x(ep->dev->regs, ep);
else
ep_reset_228x(ep->dev->regs, ep);
writel (readl (&dma->dmastat), &dma->dmastat);
writel (td_dma, &dma->dmadesc);
- if (ep->dev->pdev->vendor == 0x10b5)
+ if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
dmactl |= (0x01 << DMA_REQUEST_OUTSTANDING);
writel (dmactl, &dma->dmactl);
/* DMA request while EP halted */
if (ep->dma &&
(readl(&ep->regs->ep_rsp) & (1 << CLEAR_ENDPOINT_HALT)) &&
- (dev->pdev->vendor == 0x10b5)) {
+ (dev->pdev->vendor == PCI_VENDOR_ID_PLX)) {
int valid = 1;
if (ep->is_in) {
int expect;
} else if (!ep->is_in
&& (req->req.length % ep->ep.maxpacket) != 0) {
tmp = readl (&ep->regs->ep_stat);
- if (ep->dev->pdev->vendor == 0x10b5)
+ if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
return dma_done(ep, req, tmp, 0);
/* AVOID TROUBLE HERE by not issuing short reads from
static void abort_dma(struct net2280_ep *ep)
{
- if (ep->dev->pdev->vendor == 0x17cc)
+ if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
return abort_dma_228x(ep);
return abort_dma_338x(ep);
}
ep->wedged = 1;
} else {
clear_halt (ep);
- if (ep->dev->pdev->vendor == 0x10b5 &&
+ if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX &&
!list_empty(&ep->queue) && ep->td_dma)
restart_dma(ep);
ep->wedged = 0;
static void usb_reset(struct net2280 *dev)
{
- if (dev->pdev->vendor == 0x17cc)
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
return usb_reset_228x(dev);
return usb_reset_338x(dev);
}
static void usb_reinit(struct net2280 *dev)
{
- if (dev->pdev->vendor == 0x17cc)
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
return usb_reinit_228x(dev);
return usb_reinit_338x(dev);
}
static void ep0_start(struct net2280 *dev)
{
- if (dev->pdev->vendor == 0x17cc)
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
return ep0_start_228x(dev);
return ep0_start_338x(dev);
}
if (retval) goto err_func;
/* Enable force-full-speed testing mode, if desired */
- if (full_speed && dev->pdev->vendor == 0x17cc)
+ if (full_speed && dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
writel(1 << FORCE_FULL_SPEED_MODE, &dev->usb->xcvrdiag);
/* ... then enable host detection and ep0; and we're ready
*/
net2280_led_active (dev, 1);
- if (dev->pdev->vendor == 0x10b5)
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
defect7374_enable_data_eps_zero(dev);
ep0_start (dev);
net2280_led_active (dev, 0);
/* Disable full-speed test mode */
- if (dev->pdev->vendor == 0x17cc)
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
writel(0, &dev->usb->xcvrdiag);
device_remove_file (&dev->pdev->dev, &dev_attr_function);
}
ep->stopped = 0;
dev->protocol_stall = 0;
- if (dev->pdev->vendor == 0x10b5)
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
ep->is_halt = 0;
else{
if (ep->dev->pdev->device == 0x2280)
cpu_to_le32s (&u.raw [0]);
cpu_to_le32s (&u.raw [1]);
- if (dev->pdev->vendor == 0x10b5)
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
defect7374_workaround(dev, u.r);
tmp = 0;
} else {
VDEBUG(dev, "%s clear halt\n", e->ep.name);
clear_halt(e);
- if (ep->dev->pdev->vendor == 0x10b5 &&
+ if (ep->dev->pdev->vendor ==
+ PCI_VENDOR_ID_PLX &&
!list_empty(&e->queue) && e->td_dma)
restart_dma(e);
}
if (e->ep.name == ep0name)
goto do_stall;
set_halt (e);
- if (dev->pdev->vendor == 0x10b5 && e->dma)
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX && e->dma)
abort_dma(e);
allow_status (ep);
VDEBUG (dev, "%s set halt\n", ep->ep.name);
writel (tmp, &dma->dmastat);
/* dma sync*/
- if (dev->pdev->vendor == 0x10b5) {
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
u32 r_dmacount = readl(&dma->dmacount);
if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
(tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT)))
struct net2280 *dev = _dev;
/* shared interrupt, not ours */
- if (dev->pdev->vendor == 0x17cc &&
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY &&
(!(readl(&dev->regs->irqstat0) & (1 << INTA_ASSERTED))))
return IRQ_NONE;
/* control requests and PIO */
handle_stat0_irqs (dev, readl (&dev->regs->irqstat0));
- if (dev->pdev->vendor == 0x10b5) {
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
/* re-enable interrupt to trigger any possible new interrupt */
u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
}
if (dev->got_irq)
free_irq (pdev->irq, dev);
- if (use_msi && dev->pdev->vendor == 0x10b5)
+ if (use_msi && dev->pdev->vendor == PCI_VENDOR_ID_PLX)
pci_disable_msi(pdev);
if (dev->regs)
iounmap (dev->regs);
spin_lock_init (&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &net2280_ops;
- dev->gadget.max_speed = (dev->pdev->vendor == 0x10b5) ?
+ dev->gadget.max_speed = (dev->pdev->vendor == PCI_VENDOR_ID_PLX) ?
USB_SPEED_SUPER : USB_SPEED_HIGH;
/* the "gadget" abstracts/virtualizes the controller */
dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
- if (dev->pdev->vendor == 0x10b5) {
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
u32 fsmvalue;
u32 usbstat;
dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
goto done;
}
- if (use_msi && dev->pdev->vendor == 0x10b5)
+ if (use_msi && dev->pdev->vendor == PCI_VENDOR_ID_PLX)
if (pci_enable_msi(pdev))
ERROR(dev, "Failed to enable MSI mode\n");
}
/* enable lower-overhead pci memory bursts during DMA */
- if (dev->pdev->vendor == 0x17cc)
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
writel((1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE)
// 256 write retries may not be enough...
// | (1 << PCI_RETRY_ABORT_ENABLE)
writel (0, &dev->usb->usbctl);
/* Disable full-speed test mode */
- if (dev->pdev->vendor == 0x17cc)
+ if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
writel(0, &dev->usb->xcvrdiag);
}
static const struct pci_device_id pci_ids [] = { {
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
- .vendor = 0x17cc,
+ .vendor = PCI_VENDOR_ID_PLX_LEGACY,
.device = 0x2280,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
}, {
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
- .vendor = 0x17cc,
+ .vendor = PCI_VENDOR_ID_PLX_LEGACY,
.device = 0x2282,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
{
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
- .vendor = 0x10b5,
+ .vendor = PCI_VENDOR_ID_PLX,
.device = 0x3380,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
{
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
- .vendor = 0x10b5,
+ .vendor = PCI_VENDOR_ID_PLX,
.device = 0x3382,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,