source "drivers/iio/Kconfig"
+source "drivers/vme/Kconfig"
+
endmenu
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
obj-$(CONFIG_IIO) += iio/
+obj-$(CONFIG_VME_BUS) += vme/
source "drivers/staging/vt6656/Kconfig"
-source "drivers/staging/vme/Kconfig"
-
source "drivers/staging/sep/Kconfig"
source "drivers/staging/iio/Kconfig"
+++ /dev/null
-#
-# VME configuration.
-#
-
-menuconfig VME_BUS
- tristate "VME bridge support"
- depends on PCI
- ---help---
- If you say Y here you get support for the VME bridge Framework.
-
-if VME_BUS
-
-source "drivers/staging/vme/bridges/Kconfig"
-
-source "drivers/staging/vme/devices/Kconfig"
-
-source "drivers/staging/vme/boards/Kconfig"
-
-endif # VME
-#
-# Makefile for the VME bridge device drivers.
-#
-obj-$(CONFIG_VME_BUS) += vme.o
-
-obj-y += bridges/
obj-y += devices/
-obj-y += boards/
+++ /dev/null
- TODO
- ====
-
-- Add one or more device drivers which use the VME framework.
-
+++ /dev/null
-comment "VME Board Drivers"
-
-config VMIVME_7805
- tristate "VMIVME-7805"
- help
- If you say Y here you get support for the VMIVME-7805 board.
- This board has an additional control interface to the Universe II
- chip. This driver has to be included if you want to access VME bus
- with VMIVME-7805 board.
+++ /dev/null
-#
-# Makefile for the VME board drivers.
-#
-
-obj-$(CONFIG_VMIVME_7805) += vme_vmivme7805.o
+++ /dev/null
-/*
- * Support for the VMIVME-7805 board access to the Universe II bridge.
- *
- * Author: Arthur Benilov <arthur.benilov@iba-group.com>
- * Copyright 2010 Ion Beam Application, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/io.h>
-
-#include "vme_vmivme7805.h"
-
-static int __init vmic_init(void);
-static int vmic_probe(struct pci_dev *, const struct pci_device_id *);
-static void vmic_remove(struct pci_dev *);
-static void __exit vmic_exit(void);
-
-/** Base address to access FPGA register */
-static void *vmic_base;
-
-static const char driver_name[] = "vmivme_7805";
-
-static DEFINE_PCI_DEVICE_TABLE(vmic_ids) = {
- { PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
- { },
-};
-
-static struct pci_driver vmic_driver = {
- .name = driver_name,
- .id_table = vmic_ids,
- .probe = vmic_probe,
- .remove = vmic_remove,
-};
-
-static int __init vmic_init(void)
-{
- return pci_register_driver(&vmic_driver);
-}
-
-static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- int retval;
- u32 data;
-
- /* Enable the device */
- retval = pci_enable_device(pdev);
- if (retval) {
- dev_err(&pdev->dev, "Unable to enable device\n");
- goto err;
- }
-
- /* Map Registers */
- retval = pci_request_regions(pdev, driver_name);
- if (retval) {
- dev_err(&pdev->dev, "Unable to reserve resources\n");
- goto err_resource;
- }
-
- /* Map registers in BAR 0 */
- vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
- if (!vmic_base) {
- dev_err(&pdev->dev, "Unable to remap CRG region\n");
- retval = -EIO;
- goto err_remap;
- }
-
- /* Clear the FPGA VME IF contents */
- iowrite32(0, vmic_base + VME_CONTROL);
-
- /* Clear any initial BERR */
- data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
- data |= BM_VME_CONTROL_BERRST;
- iowrite32(data, vmic_base + VME_CONTROL);
-
- /* Enable the vme interface and byte swapping */
- data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
- data = data | BM_VME_CONTROL_MASTER_ENDIAN |
- BM_VME_CONTROL_SLAVE_ENDIAN |
- BM_VME_CONTROL_ABLE |
- BM_VME_CONTROL_BERRI |
- BM_VME_CONTROL_BPENA |
- BM_VME_CONTROL_VBENA;
- iowrite32(data, vmic_base + VME_CONTROL);
-
- return 0;
-
-err_remap:
- pci_release_regions(pdev);
-err_resource:
- pci_disable_device(pdev);
-err:
- return retval;
-}
-
-static void vmic_remove(struct pci_dev *pdev)
-{
- iounmap(vmic_base);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
-}
-
-static void __exit vmic_exit(void)
-{
- pci_unregister_driver(&vmic_driver);
-}
-
-MODULE_DESCRIPTION("VMIVME-7805 board support driver");
-MODULE_AUTHOR("Arthur Benilov <arthur.benilov@iba-group.com>");
-MODULE_LICENSE("GPL");
-
-module_init(vmic_init);
-module_exit(vmic_exit);
-
+++ /dev/null
-/*
- * vmivme_7805.h
- *
- * Support for the VMIVME-7805 board access to the Universe II bridge.
- *
- * Author: Arthur Benilov <arthur.benilov@iba-group.com>
- * Copyright 2010 Ion Beam Application, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-
-#ifndef _VMIVME_7805_H
-#define _VMIVME_7805_H
-
-#ifndef PCI_VENDOR_ID_VMIC
-#define PCI_VENDOR_ID_VMIC 0x114A
-#endif
-
-#ifndef PCI_DEVICE_ID_VTIMR
-#define PCI_DEVICE_ID_VTIMR 0x0004
-#endif
-
-#define VME_CONTROL 0x0000
-#define BM_VME_CONTROL_MASTER_ENDIAN 0x0001
-#define BM_VME_CONTROL_SLAVE_ENDIAN 0x0002
-#define BM_VME_CONTROL_ABLE 0x0004
-#define BM_VME_CONTROL_BERRI 0x0040
-#define BM_VME_CONTROL_BERRST 0x0080
-#define BM_VME_CONTROL_BPENA 0x0400
-#define BM_VME_CONTROL_VBENA 0x0800
-
-#endif /* _VMIVME_7805_H */
-
+++ /dev/null
-comment "VME Bridge Drivers"
-
-config VME_CA91CX42
- tristate "Universe II"
- depends on VIRT_TO_BUS
- help
- If you say Y here you get support for the Tundra CA91C142
- (Universe II) VME bridge chip.
-
-config VME_TSI148
- tristate "Tempe"
- depends on VIRT_TO_BUS
- help
- If you say Y here you get support for the Tundra TSI148 VME bridge
- chip.
+++ /dev/null
-obj-$(CONFIG_VME_CA91CX42) += vme_ca91cx42.o
-obj-$(CONFIG_VME_TSI148) += vme_tsi148.o
+++ /dev/null
-/*
- * Support for the Tundra Universe I/II VME-PCI Bridge Chips
- *
- * Author: Martyn Welch <martyn.welch@ge.com>
- * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
- *
- * Based on work by Tom Armistead and Ajit Prem
- * Copyright 2004 Motorola Inc.
- *
- * Derived from ca91c042.c by Michael Wyrick
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/poll.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#include "../vme.h"
-#include "../vme_bridge.h"
-#include "vme_ca91cx42.h"
-
-static int __init ca91cx42_init(void);
-static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
-static void ca91cx42_remove(struct pci_dev *);
-static void __exit ca91cx42_exit(void);
-
-/* Module parameters */
-static int geoid;
-
-static const char driver_name[] = "vme_ca91cx42";
-
-static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
- { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
- { },
-};
-
-static struct pci_driver ca91cx42_driver = {
- .name = driver_name,
- .id_table = ca91cx42_ids,
- .probe = ca91cx42_probe,
- .remove = ca91cx42_remove,
-};
-
-static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
-{
- wake_up(&bridge->dma_queue);
-
- return CA91CX42_LINT_DMA;
-}
-
-static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
-{
- int i;
- u32 serviced = 0;
-
- for (i = 0; i < 4; i++) {
- if (stat & CA91CX42_LINT_LM[i]) {
- /* We only enable interrupts if the callback is set */
- bridge->lm_callback[i](i);
- serviced |= CA91CX42_LINT_LM[i];
- }
- }
-
- return serviced;
-}
-
-/* XXX This needs to be split into 4 queues */
-static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
-{
- wake_up(&bridge->mbox_queue);
-
- return CA91CX42_LINT_MBOX;
-}
-
-static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
-{
- wake_up(&bridge->iack_queue);
-
- return CA91CX42_LINT_SW_IACK;
-}
-
-static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
-{
- int val;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- val = ioread32(bridge->base + DGCS);
-
- if (!(val & 0x00000800)) {
- dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
- "Read Error DGCS=%08X\n", val);
- }
-
- return CA91CX42_LINT_VERR;
-}
-
-static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
-{
- int val;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- val = ioread32(bridge->base + DGCS);
-
- if (!(val & 0x00000800))
- dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
- "Read Error DGCS=%08X\n", val);
-
- return CA91CX42_LINT_LERR;
-}
-
-
-static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
- int stat)
-{
- int vec, i, serviced = 0;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
-
- for (i = 7; i > 0; i--) {
- if (stat & (1 << i)) {
- vec = ioread32(bridge->base +
- CA91CX42_V_STATID[i]) & 0xff;
-
- vme_irq_handler(ca91cx42_bridge, i, vec);
-
- serviced |= (1 << i);
- }
- }
-
- return serviced;
-}
-
-static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
-{
- u32 stat, enable, serviced = 0;
- struct vme_bridge *ca91cx42_bridge;
- struct ca91cx42_driver *bridge;
-
- ca91cx42_bridge = ptr;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- enable = ioread32(bridge->base + LINT_EN);
- stat = ioread32(bridge->base + LINT_STAT);
-
- /* Only look at unmasked interrupts */
- stat &= enable;
-
- if (unlikely(!stat))
- return IRQ_NONE;
-
- if (stat & CA91CX42_LINT_DMA)
- serviced |= ca91cx42_DMA_irqhandler(bridge);
- if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
- CA91CX42_LINT_LM3))
- serviced |= ca91cx42_LM_irqhandler(bridge, stat);
- if (stat & CA91CX42_LINT_MBOX)
- serviced |= ca91cx42_MB_irqhandler(bridge, stat);
- if (stat & CA91CX42_LINT_SW_IACK)
- serviced |= ca91cx42_IACK_irqhandler(bridge);
- if (stat & CA91CX42_LINT_VERR)
- serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
- if (stat & CA91CX42_LINT_LERR)
- serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
- if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
- CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
- CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
- CA91CX42_LINT_VIRQ7))
- serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
-
- /* Clear serviced interrupts */
- iowrite32(serviced, bridge->base + LINT_STAT);
-
- return IRQ_HANDLED;
-}
-
-static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
-{
- int result, tmp;
- struct pci_dev *pdev;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- /* Need pdev */
- pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
-
- /* Initialise list for VME bus errors */
- INIT_LIST_HEAD(&ca91cx42_bridge->vme_errors);
-
- mutex_init(&ca91cx42_bridge->irq_mtx);
-
- /* Disable interrupts from PCI to VME */
- iowrite32(0, bridge->base + VINT_EN);
-
- /* Disable PCI interrupts */
- iowrite32(0, bridge->base + LINT_EN);
- /* Clear Any Pending PCI Interrupts */
- iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
-
- result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
- driver_name, ca91cx42_bridge);
- if (result) {
- dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
- pdev->irq);
- return result;
- }
-
- /* Ensure all interrupts are mapped to PCI Interrupt 0 */
- iowrite32(0, bridge->base + LINT_MAP0);
- iowrite32(0, bridge->base + LINT_MAP1);
- iowrite32(0, bridge->base + LINT_MAP2);
-
- /* Enable DMA, mailbox & LM Interrupts */
- tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
- CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
- CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
-
- iowrite32(tmp, bridge->base + LINT_EN);
-
- return 0;
-}
-
-static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
- struct pci_dev *pdev)
-{
- /* Disable interrupts from PCI to VME */
- iowrite32(0, bridge->base + VINT_EN);
-
- /* Disable PCI interrupts */
- iowrite32(0, bridge->base + LINT_EN);
- /* Clear Any Pending PCI Interrupts */
- iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
-
- free_irq(pdev->irq, pdev);
-}
-
-static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
-{
- u32 tmp;
-
- tmp = ioread32(bridge->base + LINT_STAT);
-
- if (tmp & (1 << level))
- return 0;
- else
- return 1;
-}
-
-/*
- * Set up an VME interrupt
- */
-static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
- int state, int sync)
-
-{
- struct pci_dev *pdev;
- u32 tmp;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- /* Enable IRQ level */
- tmp = ioread32(bridge->base + LINT_EN);
-
- if (state == 0)
- tmp &= ~CA91CX42_LINT_VIRQ[level];
- else
- tmp |= CA91CX42_LINT_VIRQ[level];
-
- iowrite32(tmp, bridge->base + LINT_EN);
-
- if ((state == 0) && (sync != 0)) {
- pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
- dev);
-
- synchronize_irq(pdev->irq);
- }
-}
-
-static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
- int statid)
-{
- u32 tmp;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- /* Universe can only generate even vectors */
- if (statid & 1)
- return -EINVAL;
-
- mutex_lock(&bridge->vme_int);
-
- tmp = ioread32(bridge->base + VINT_EN);
-
- /* Set Status/ID */
- iowrite32(statid << 24, bridge->base + STATID);
-
- /* Assert VMEbus IRQ */
- tmp = tmp | (1 << (level + 24));
- iowrite32(tmp, bridge->base + VINT_EN);
-
- /* Wait for IACK */
- wait_event_interruptible(bridge->iack_queue,
- ca91cx42_iack_received(bridge, level));
-
- /* Return interrupt to low state */
- tmp = ioread32(bridge->base + VINT_EN);
- tmp = tmp & ~(1 << (level + 24));
- iowrite32(tmp, bridge->base + VINT_EN);
-
- mutex_unlock(&bridge->vme_int);
-
- return 0;
-}
-
-static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- dma_addr_t pci_base, u32 aspace, u32 cycle)
-{
- unsigned int i, addr = 0, granularity;
- unsigned int temp_ctl = 0;
- unsigned int vme_bound, pci_offset;
- struct vme_bridge *ca91cx42_bridge;
- struct ca91cx42_driver *bridge;
-
- ca91cx42_bridge = image->parent;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- i = image->number;
-
- switch (aspace) {
- case VME_A16:
- addr |= CA91CX42_VSI_CTL_VAS_A16;
- break;
- case VME_A24:
- addr |= CA91CX42_VSI_CTL_VAS_A24;
- break;
- case VME_A32:
- addr |= CA91CX42_VSI_CTL_VAS_A32;
- break;
- case VME_USER1:
- addr |= CA91CX42_VSI_CTL_VAS_USER1;
- break;
- case VME_USER2:
- addr |= CA91CX42_VSI_CTL_VAS_USER2;
- break;
- case VME_A64:
- case VME_CRCSR:
- case VME_USER3:
- case VME_USER4:
- default:
- dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
- return -EINVAL;
- break;
- }
-
- /*
- * Bound address is a valid address for the window, adjust
- * accordingly
- */
- vme_bound = vme_base + size;
- pci_offset = pci_base - vme_base;
-
- if ((i == 0) || (i == 4))
- granularity = 0x1000;
- else
- granularity = 0x10000;
-
- if (vme_base & (granularity - 1)) {
- dev_err(ca91cx42_bridge->parent, "Invalid VME base "
- "alignment\n");
- return -EINVAL;
- }
- if (vme_bound & (granularity - 1)) {
- dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
- "alignment\n");
- return -EINVAL;
- }
- if (pci_offset & (granularity - 1)) {
- dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
- "alignment\n");
- return -EINVAL;
- }
-
- /* Disable while we are mucking around */
- temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
- temp_ctl &= ~CA91CX42_VSI_CTL_EN;
- iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
-
- /* Setup mapping */
- iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
- iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
- iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
-
- /* Setup address space */
- temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
- temp_ctl |= addr;
-
- /* Setup cycle types */
- temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
- if (cycle & VME_SUPER)
- temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
- if (cycle & VME_USER)
- temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
- if (cycle & VME_PROG)
- temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
- if (cycle & VME_DATA)
- temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
-
- /* Write ctl reg without enable */
- iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
-
- if (enabled)
- temp_ctl |= CA91CX42_VSI_CTL_EN;
-
- iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
-
- return 0;
-}
-
-static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
-{
- unsigned int i, granularity = 0, ctl = 0;
- unsigned long long vme_bound, pci_offset;
- struct ca91cx42_driver *bridge;
-
- bridge = image->parent->driver_priv;
-
- i = image->number;
-
- if ((i == 0) || (i == 4))
- granularity = 0x1000;
- else
- granularity = 0x10000;
-
- /* Read Registers */
- ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
-
- *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
- vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
- pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
-
- *pci_base = (dma_addr_t)vme_base + pci_offset;
- *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
-
- *enabled = 0;
- *aspace = 0;
- *cycle = 0;
-
- if (ctl & CA91CX42_VSI_CTL_EN)
- *enabled = 1;
-
- if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
- *aspace = VME_A16;
- if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
- *aspace = VME_A24;
- if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
- *aspace = VME_A32;
- if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
- *aspace = VME_USER1;
- if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
- *aspace = VME_USER2;
-
- if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
- *cycle |= VME_SUPER;
- if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
- *cycle |= VME_USER;
- if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
- *cycle |= VME_PROG;
- if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
- *cycle |= VME_DATA;
-
- return 0;
-}
-
-/*
- * Allocate and map PCI Resource
- */
-static int ca91cx42_alloc_resource(struct vme_master_resource *image,
- unsigned long long size)
-{
- unsigned long long existing_size;
- int retval = 0;
- struct pci_dev *pdev;
- struct vme_bridge *ca91cx42_bridge;
-
- ca91cx42_bridge = image->parent;
-
- /* Find pci_dev container of dev */
- if (ca91cx42_bridge->parent == NULL) {
- dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
- return -EINVAL;
- }
- pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
-
- existing_size = (unsigned long long)(image->bus_resource.end -
- image->bus_resource.start);
-
- /* If the existing size is OK, return */
- if (existing_size == (size - 1))
- return 0;
-
- if (existing_size != 0) {
- iounmap(image->kern_base);
- image->kern_base = NULL;
- kfree(image->bus_resource.name);
- release_resource(&image->bus_resource);
- memset(&image->bus_resource, 0, sizeof(struct resource));
- }
-
- if (image->bus_resource.name == NULL) {
- image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
- if (image->bus_resource.name == NULL) {
- dev_err(ca91cx42_bridge->parent, "Unable to allocate "
- "memory for resource name\n");
- retval = -ENOMEM;
- goto err_name;
- }
- }
-
- sprintf((char *)image->bus_resource.name, "%s.%d",
- ca91cx42_bridge->name, image->number);
-
- image->bus_resource.start = 0;
- image->bus_resource.end = (unsigned long)size;
- image->bus_resource.flags = IORESOURCE_MEM;
-
- retval = pci_bus_alloc_resource(pdev->bus,
- &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
- 0, NULL, NULL);
- if (retval) {
- dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
- "resource for window %d size 0x%lx start 0x%lx\n",
- image->number, (unsigned long)size,
- (unsigned long)image->bus_resource.start);
- goto err_resource;
- }
-
- image->kern_base = ioremap_nocache(
- image->bus_resource.start, size);
- if (image->kern_base == NULL) {
- dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
- retval = -ENOMEM;
- goto err_remap;
- }
-
- return 0;
-
-err_remap:
- release_resource(&image->bus_resource);
-err_resource:
- kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
-err_name:
- return retval;
-}
-
-/*
- * Free and unmap PCI Resource
- */
-static void ca91cx42_free_resource(struct vme_master_resource *image)
-{
- iounmap(image->kern_base);
- image->kern_base = NULL;
- release_resource(&image->bus_resource);
- kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
-}
-
-
-static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size, u32 aspace,
- u32 cycle, u32 dwidth)
-{
- int retval = 0;
- unsigned int i, granularity = 0;
- unsigned int temp_ctl = 0;
- unsigned long long pci_bound, vme_offset, pci_base;
- struct vme_bridge *ca91cx42_bridge;
- struct ca91cx42_driver *bridge;
-
- ca91cx42_bridge = image->parent;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- i = image->number;
-
- if ((i == 0) || (i == 4))
- granularity = 0x1000;
- else
- granularity = 0x10000;
-
- /* Verify input data */
- if (vme_base & (granularity - 1)) {
- dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
- "alignment\n");
- retval = -EINVAL;
- goto err_window;
- }
- if (size & (granularity - 1)) {
- dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
- "alignment\n");
- retval = -EINVAL;
- goto err_window;
- }
-
- spin_lock(&image->lock);
-
- /*
- * Let's allocate the resource here rather than further up the stack as
- * it avoids pushing loads of bus dependent stuff up the stack
- */
- retval = ca91cx42_alloc_resource(image, size);
- if (retval) {
- spin_unlock(&image->lock);
- dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
- "for resource name\n");
- retval = -ENOMEM;
- goto err_res;
- }
-
- pci_base = (unsigned long long)image->bus_resource.start;
-
- /*
- * Bound address is a valid address for the window, adjust
- * according to window granularity.
- */
- pci_bound = pci_base + size;
- vme_offset = vme_base - pci_base;
-
- /* Disable while we are mucking around */
- temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
- temp_ctl &= ~CA91CX42_LSI_CTL_EN;
- iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
-
- /* Setup cycle types */
- temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
- if (cycle & VME_BLT)
- temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
- if (cycle & VME_MBLT)
- temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
-
- /* Setup data width */
- temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
- switch (dwidth) {
- case VME_D8:
- temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
- break;
- case VME_D16:
- temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
- break;
- case VME_D32:
- temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
- break;
- case VME_D64:
- temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
- break;
- default:
- spin_unlock(&image->lock);
- dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
- retval = -EINVAL;
- goto err_dwidth;
- break;
- }
-
- /* Setup address space */
- temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
- switch (aspace) {
- case VME_A16:
- temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
- break;
- case VME_A24:
- temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
- break;
- case VME_A32:
- temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
- break;
- case VME_CRCSR:
- temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
- break;
- case VME_USER1:
- temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
- break;
- case VME_USER2:
- temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
- break;
- case VME_A64:
- case VME_USER3:
- case VME_USER4:
- default:
- spin_unlock(&image->lock);
- dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
- retval = -EINVAL;
- goto err_aspace;
- break;
- }
-
- temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
- if (cycle & VME_SUPER)
- temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
- if (cycle & VME_PROG)
- temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
-
- /* Setup mapping */
- iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
- iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
- iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
-
- /* Write ctl reg without enable */
- iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
-
- if (enabled)
- temp_ctl |= CA91CX42_LSI_CTL_EN;
-
- iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
-
- spin_unlock(&image->lock);
- return 0;
-
-err_aspace:
-err_dwidth:
- ca91cx42_free_resource(image);
-err_res:
-err_window:
- return retval;
-}
-
-static int __ca91cx42_master_get(struct vme_master_resource *image,
- int *enabled, unsigned long long *vme_base, unsigned long long *size,
- u32 *aspace, u32 *cycle, u32 *dwidth)
-{
- unsigned int i, ctl;
- unsigned long long pci_base, pci_bound, vme_offset;
- struct ca91cx42_driver *bridge;
-
- bridge = image->parent->driver_priv;
-
- i = image->number;
-
- ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
-
- pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
- vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
- pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
-
- *vme_base = pci_base + vme_offset;
- *size = (unsigned long long)(pci_bound - pci_base);
-
- *enabled = 0;
- *aspace = 0;
- *cycle = 0;
- *dwidth = 0;
-
- if (ctl & CA91CX42_LSI_CTL_EN)
- *enabled = 1;
-
- /* Setup address space */
- switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
- case CA91CX42_LSI_CTL_VAS_A16:
- *aspace = VME_A16;
- break;
- case CA91CX42_LSI_CTL_VAS_A24:
- *aspace = VME_A24;
- break;
- case CA91CX42_LSI_CTL_VAS_A32:
- *aspace = VME_A32;
- break;
- case CA91CX42_LSI_CTL_VAS_CRCSR:
- *aspace = VME_CRCSR;
- break;
- case CA91CX42_LSI_CTL_VAS_USER1:
- *aspace = VME_USER1;
- break;
- case CA91CX42_LSI_CTL_VAS_USER2:
- *aspace = VME_USER2;
- break;
- }
-
- /* XXX Not sure howto check for MBLT */
- /* Setup cycle types */
- if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
- *cycle |= VME_BLT;
- else
- *cycle |= VME_SCT;
-
- if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
- *cycle |= VME_SUPER;
- else
- *cycle |= VME_USER;
-
- if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
- *cycle = VME_PROG;
- else
- *cycle = VME_DATA;
-
- /* Setup data width */
- switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
- case CA91CX42_LSI_CTL_VDW_D8:
- *dwidth = VME_D8;
- break;
- case CA91CX42_LSI_CTL_VDW_D16:
- *dwidth = VME_D16;
- break;
- case CA91CX42_LSI_CTL_VDW_D32:
- *dwidth = VME_D32;
- break;
- case CA91CX42_LSI_CTL_VDW_D64:
- *dwidth = VME_D64;
- break;
- }
-
- return 0;
-}
-
-static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
- u32 *cycle, u32 *dwidth)
-{
- int retval;
-
- spin_lock(&image->lock);
-
- retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
- cycle, dwidth);
-
- spin_unlock(&image->lock);
-
- return retval;
-}
-
-static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
- void *buf, size_t count, loff_t offset)
-{
- ssize_t retval;
- void *addr = image->kern_base + offset;
- unsigned int done = 0;
- unsigned int count32;
-
- if (count == 0)
- return 0;
-
- spin_lock(&image->lock);
-
- /* The following code handles VME address alignment problem
- * in order to assure the maximal data width cycle.
- * We cannot use memcpy_xxx directly here because it
- * may cut data transfer in 8-bits cycles, thus making
- * D16 cycle impossible.
- * From the other hand, the bridge itself assures that
- * maximal configured data cycle is used and splits it
- * automatically for non-aligned addresses.
- */
- if ((uintptr_t)addr & 0x1) {
- *(u8 *)buf = ioread8(addr);
- done += 1;
- if (done == count)
- goto out;
- }
- if ((uintptr_t)addr & 0x2) {
- if ((count - done) < 2) {
- *(u8 *)(buf + done) = ioread8(addr + done);
- done += 1;
- goto out;
- } else {
- *(u16 *)(buf + done) = ioread16(addr + done);
- done += 2;
- }
- }
-
- count32 = (count - done) & ~0x3;
- if (count32 > 0) {
- memcpy_fromio(buf + done, addr + done, (unsigned int)count);
- done += count32;
- }
-
- if ((count - done) & 0x2) {
- *(u16 *)(buf + done) = ioread16(addr + done);
- done += 2;
- }
- if ((count - done) & 0x1) {
- *(u8 *)(buf + done) = ioread8(addr + done);
- done += 1;
- }
-out:
- retval = count;
- spin_unlock(&image->lock);
-
- return retval;
-}
-
-static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
- void *buf, size_t count, loff_t offset)
-{
- ssize_t retval;
- void *addr = image->kern_base + offset;
- unsigned int done = 0;
- unsigned int count32;
-
- if (count == 0)
- return 0;
-
- spin_lock(&image->lock);
-
- /* Here we apply for the same strategy we do in master_read
- * function in order to assure D16 cycle when required.
- */
- if ((uintptr_t)addr & 0x1) {
- iowrite8(*(u8 *)buf, addr);
- done += 1;
- if (done == count)
- goto out;
- }
- if ((uintptr_t)addr & 0x2) {
- if ((count - done) < 2) {
- iowrite8(*(u8 *)(buf + done), addr + done);
- done += 1;
- goto out;
- } else {
- iowrite16(*(u16 *)(buf + done), addr + done);
- done += 2;
- }
- }
-
- count32 = (count - done) & ~0x3;
- if (count32 > 0) {
- memcpy_toio(addr + done, buf + done, count32);
- done += count32;
- }
-
- if ((count - done) & 0x2) {
- iowrite16(*(u16 *)(buf + done), addr + done);
- done += 2;
- }
- if ((count - done) & 0x1) {
- iowrite8(*(u8 *)(buf + done), addr + done);
- done += 1;
- }
-out:
- retval = count;
-
- spin_unlock(&image->lock);
-
- return retval;
-}
-
-static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
- unsigned int mask, unsigned int compare, unsigned int swap,
- loff_t offset)
-{
- u32 result;
- uintptr_t pci_addr;
- int i;
- struct ca91cx42_driver *bridge;
- struct device *dev;
-
- bridge = image->parent->driver_priv;
- dev = image->parent->parent;
-
- /* Find the PCI address that maps to the desired VME address */
- i = image->number;
-
- /* Locking as we can only do one of these at a time */
- mutex_lock(&bridge->vme_rmw);
-
- /* Lock image */
- spin_lock(&image->lock);
-
- pci_addr = (uintptr_t)image->kern_base + offset;
-
- /* Address must be 4-byte aligned */
- if (pci_addr & 0x3) {
- dev_err(dev, "RMW Address not 4-byte aligned\n");
- result = -EINVAL;
- goto out;
- }
-
- /* Ensure RMW Disabled whilst configuring */
- iowrite32(0, bridge->base + SCYC_CTL);
-
- /* Configure registers */
- iowrite32(mask, bridge->base + SCYC_EN);
- iowrite32(compare, bridge->base + SCYC_CMP);
- iowrite32(swap, bridge->base + SCYC_SWP);
- iowrite32(pci_addr, bridge->base + SCYC_ADDR);
-
- /* Enable RMW */
- iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
-
- /* Kick process off with a read to the required address. */
- result = ioread32(image->kern_base + offset);
-
- /* Disable RMW */
- iowrite32(0, bridge->base + SCYC_CTL);
-
-out:
- spin_unlock(&image->lock);
-
- mutex_unlock(&bridge->vme_rmw);
-
- return result;
-}
-
-static int ca91cx42_dma_list_add(struct vme_dma_list *list,
- struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
-{
- struct ca91cx42_dma_entry *entry, *prev;
- struct vme_dma_pci *pci_attr;
- struct vme_dma_vme *vme_attr;
- dma_addr_t desc_ptr;
- int retval = 0;
- struct device *dev;
-
- dev = list->parent->parent->parent;
-
- /* XXX descriptor must be aligned on 64-bit boundaries */
- entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
- if (entry == NULL) {
- dev_err(dev, "Failed to allocate memory for dma resource "
- "structure\n");
- retval = -ENOMEM;
- goto err_mem;
- }
-
- /* Test descriptor alignment */
- if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
- dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
- "required: %p\n", &entry->descriptor);
- retval = -EINVAL;
- goto err_align;
- }
-
- memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
-
- if (dest->type == VME_DMA_VME) {
- entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
- vme_attr = dest->private;
- pci_attr = src->private;
- } else {
- vme_attr = src->private;
- pci_attr = dest->private;
- }
-
- /* Check we can do fulfill required attributes */
- if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
- VME_USER2)) != 0) {
-
- dev_err(dev, "Unsupported cycle type\n");
- retval = -EINVAL;
- goto err_aspace;
- }
-
- if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
- VME_PROG | VME_DATA)) != 0) {
-
- dev_err(dev, "Unsupported cycle type\n");
- retval = -EINVAL;
- goto err_cycle;
- }
-
- /* Check to see if we can fulfill source and destination */
- if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
- ((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
-
- dev_err(dev, "Cannot perform transfer with this "
- "source-destination combination\n");
- retval = -EINVAL;
- goto err_direct;
- }
-
- /* Setup cycle types */
- if (vme_attr->cycle & VME_BLT)
- entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
-
- /* Setup data width */
- switch (vme_attr->dwidth) {
- case VME_D8:
- entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
- break;
- case VME_D16:
- entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
- break;
- case VME_D32:
- entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
- break;
- case VME_D64:
- entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
- break;
- default:
- dev_err(dev, "Invalid data width\n");
- return -EINVAL;
- }
-
- /* Setup address space */
- switch (vme_attr->aspace) {
- case VME_A16:
- entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
- break;
- case VME_A24:
- entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
- break;
- case VME_A32:
- entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
- break;
- case VME_USER1:
- entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
- break;
- case VME_USER2:
- entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
- break;
- default:
- dev_err(dev, "Invalid address space\n");
- return -EINVAL;
- break;
- }
-
- if (vme_attr->cycle & VME_SUPER)
- entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
- if (vme_attr->cycle & VME_PROG)
- entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
-
- entry->descriptor.dtbc = count;
- entry->descriptor.dla = pci_attr->address;
- entry->descriptor.dva = vme_attr->address;
- entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
-
- /* Add to list */
- list_add_tail(&entry->list, &list->entries);
-
- /* Fill out previous descriptors "Next Address" */
- if (entry->list.prev != &list->entries) {
- prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
- list);
- /* We need the bus address for the pointer */
- desc_ptr = virt_to_bus(&entry->descriptor);
- prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
- }
-
- return 0;
-
-err_cycle:
-err_aspace:
-err_direct:
-err_align:
- kfree(entry);
-err_mem:
- return retval;
-}
-
-static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
-{
- u32 tmp;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- tmp = ioread32(bridge->base + DGCS);
-
- if (tmp & CA91CX42_DGCS_ACT)
- return 0;
- else
- return 1;
-}
-
-static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
-{
- struct vme_dma_resource *ctrlr;
- struct ca91cx42_dma_entry *entry;
- int retval = 0;
- dma_addr_t bus_addr;
- u32 val;
- struct device *dev;
- struct ca91cx42_driver *bridge;
-
- ctrlr = list->parent;
-
- bridge = ctrlr->parent->driver_priv;
- dev = ctrlr->parent->parent;
-
- mutex_lock(&ctrlr->mtx);
-
- if (!(list_empty(&ctrlr->running))) {
- /*
- * XXX We have an active DMA transfer and currently haven't
- * sorted out the mechanism for "pending" DMA transfers.
- * Return busy.
- */
- /* Need to add to pending here */
- mutex_unlock(&ctrlr->mtx);
- return -EBUSY;
- } else {
- list_add(&list->list, &ctrlr->running);
- }
-
- /* Get first bus address and write into registers */
- entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
- list);
-
- bus_addr = virt_to_bus(&entry->descriptor);
-
- mutex_unlock(&ctrlr->mtx);
-
- iowrite32(0, bridge->base + DTBC);
- iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
-
- /* Start the operation */
- val = ioread32(bridge->base + DGCS);
-
- /* XXX Could set VMEbus On and Off Counters here */
- val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
-
- val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
- CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
- CA91CX42_DGCS_PERR);
-
- iowrite32(val, bridge->base + DGCS);
-
- val |= CA91CX42_DGCS_GO;
-
- iowrite32(val, bridge->base + DGCS);
-
- wait_event_interruptible(bridge->dma_queue,
- ca91cx42_dma_busy(ctrlr->parent));
-
- /*
- * Read status register, this register is valid until we kick off a
- * new transfer.
- */
- val = ioread32(bridge->base + DGCS);
-
- if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
- CA91CX42_DGCS_PERR)) {
-
- dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
- val = ioread32(bridge->base + DCTL);
- }
-
- /* Remove list from running list */
- mutex_lock(&ctrlr->mtx);
- list_del(&list->list);
- mutex_unlock(&ctrlr->mtx);
-
- return retval;
-
-}
-
-static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
-{
- struct list_head *pos, *temp;
- struct ca91cx42_dma_entry *entry;
-
- /* detach and free each entry */
- list_for_each_safe(pos, temp, &list->entries) {
- list_del(pos);
- entry = list_entry(pos, struct ca91cx42_dma_entry, list);
- kfree(entry);
- }
-
- return 0;
-}
-
-/*
- * All 4 location monitors reside at the same base - this is therefore a
- * system wide configuration.
- *
- * This does not enable the LM monitor - that should be done when the first
- * callback is attached and disabled when the last callback is removed.
- */
-static int ca91cx42_lm_set(struct vme_lm_resource *lm,
- unsigned long long lm_base, u32 aspace, u32 cycle)
-{
- u32 temp_base, lm_ctl = 0;
- int i;
- struct ca91cx42_driver *bridge;
- struct device *dev;
-
- bridge = lm->parent->driver_priv;
- dev = lm->parent->parent;
-
- /* Check the alignment of the location monitor */
- temp_base = (u32)lm_base;
- if (temp_base & 0xffff) {
- dev_err(dev, "Location monitor must be aligned to 64KB "
- "boundary");
- return -EINVAL;
- }
-
- mutex_lock(&lm->mtx);
-
- /* If we already have a callback attached, we can't move it! */
- for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL) {
- mutex_unlock(&lm->mtx);
- dev_err(dev, "Location monitor callback attached, "
- "can't reset\n");
- return -EBUSY;
- }
- }
-
- switch (aspace) {
- case VME_A16:
- lm_ctl |= CA91CX42_LM_CTL_AS_A16;
- break;
- case VME_A24:
- lm_ctl |= CA91CX42_LM_CTL_AS_A24;
- break;
- case VME_A32:
- lm_ctl |= CA91CX42_LM_CTL_AS_A32;
- break;
- default:
- mutex_unlock(&lm->mtx);
- dev_err(dev, "Invalid address space\n");
- return -EINVAL;
- break;
- }
-
- if (cycle & VME_SUPER)
- lm_ctl |= CA91CX42_LM_CTL_SUPR;
- if (cycle & VME_USER)
- lm_ctl |= CA91CX42_LM_CTL_NPRIV;
- if (cycle & VME_PROG)
- lm_ctl |= CA91CX42_LM_CTL_PGM;
- if (cycle & VME_DATA)
- lm_ctl |= CA91CX42_LM_CTL_DATA;
-
- iowrite32(lm_base, bridge->base + LM_BS);
- iowrite32(lm_ctl, bridge->base + LM_CTL);
-
- mutex_unlock(&lm->mtx);
-
- return 0;
-}
-
-/* Get configuration of the callback monitor and return whether it is enabled
- * or disabled.
- */
-static int ca91cx42_lm_get(struct vme_lm_resource *lm,
- unsigned long long *lm_base, u32 *aspace, u32 *cycle)
-{
- u32 lm_ctl, enabled = 0;
- struct ca91cx42_driver *bridge;
-
- bridge = lm->parent->driver_priv;
-
- mutex_lock(&lm->mtx);
-
- *lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
- lm_ctl = ioread32(bridge->base + LM_CTL);
-
- if (lm_ctl & CA91CX42_LM_CTL_EN)
- enabled = 1;
-
- if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
- *aspace = VME_A16;
- if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
- *aspace = VME_A24;
- if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
- *aspace = VME_A32;
-
- *cycle = 0;
- if (lm_ctl & CA91CX42_LM_CTL_SUPR)
- *cycle |= VME_SUPER;
- if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
- *cycle |= VME_USER;
- if (lm_ctl & CA91CX42_LM_CTL_PGM)
- *cycle |= VME_PROG;
- if (lm_ctl & CA91CX42_LM_CTL_DATA)
- *cycle |= VME_DATA;
-
- mutex_unlock(&lm->mtx);
-
- return enabled;
-}
-
-/*
- * Attach a callback to a specific location monitor.
- *
- * Callback will be passed the monitor triggered.
- */
-static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
- void (*callback)(int))
-{
- u32 lm_ctl, tmp;
- struct ca91cx42_driver *bridge;
- struct device *dev;
-
- bridge = lm->parent->driver_priv;
- dev = lm->parent->parent;
-
- mutex_lock(&lm->mtx);
-
- /* Ensure that the location monitor is configured - need PGM or DATA */
- lm_ctl = ioread32(bridge->base + LM_CTL);
- if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
- mutex_unlock(&lm->mtx);
- dev_err(dev, "Location monitor not properly configured\n");
- return -EINVAL;
- }
-
- /* Check that a callback isn't already attached */
- if (bridge->lm_callback[monitor] != NULL) {
- mutex_unlock(&lm->mtx);
- dev_err(dev, "Existing callback attached\n");
- return -EBUSY;
- }
-
- /* Attach callback */
- bridge->lm_callback[monitor] = callback;
-
- /* Enable Location Monitor interrupt */
- tmp = ioread32(bridge->base + LINT_EN);
- tmp |= CA91CX42_LINT_LM[monitor];
- iowrite32(tmp, bridge->base + LINT_EN);
-
- /* Ensure that global Location Monitor Enable set */
- if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
- lm_ctl |= CA91CX42_LM_CTL_EN;
- iowrite32(lm_ctl, bridge->base + LM_CTL);
- }
-
- mutex_unlock(&lm->mtx);
-
- return 0;
-}
-
-/*
- * Detach a callback function forn a specific location monitor.
- */
-static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
-{
- u32 tmp;
- struct ca91cx42_driver *bridge;
-
- bridge = lm->parent->driver_priv;
-
- mutex_lock(&lm->mtx);
-
- /* Disable Location Monitor and ensure previous interrupts are clear */
- tmp = ioread32(bridge->base + LINT_EN);
- tmp &= ~CA91CX42_LINT_LM[monitor];
- iowrite32(tmp, bridge->base + LINT_EN);
-
- iowrite32(CA91CX42_LINT_LM[monitor],
- bridge->base + LINT_STAT);
-
- /* Detach callback */
- bridge->lm_callback[monitor] = NULL;
-
- /* If all location monitors disabled, disable global Location Monitor */
- if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
- CA91CX42_LINT_LM3)) == 0) {
- tmp = ioread32(bridge->base + LM_CTL);
- tmp &= ~CA91CX42_LM_CTL_EN;
- iowrite32(tmp, bridge->base + LM_CTL);
- }
-
- mutex_unlock(&lm->mtx);
-
- return 0;
-}
-
-static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
-{
- u32 slot = 0;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- if (!geoid) {
- slot = ioread32(bridge->base + VCSR_BS);
- slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
- } else
- slot = geoid;
-
- return (int)slot;
-
-}
-
-void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
- dma_addr_t *dma)
-{
- struct pci_dev *pdev;
-
- /* Find pci_dev container of dev */
- pdev = container_of(parent, struct pci_dev, dev);
-
- return pci_alloc_consistent(pdev, size, dma);
-}
-
-void ca91cx42_free_consistent(struct device *parent, size_t size, void *vaddr,
- dma_addr_t dma)
-{
- struct pci_dev *pdev;
-
- /* Find pci_dev container of dev */
- pdev = container_of(parent, struct pci_dev, dev);
-
- pci_free_consistent(pdev, size, vaddr, dma);
-}
-
-static int __init ca91cx42_init(void)
-{
- return pci_register_driver(&ca91cx42_driver);
-}
-
-/*
- * Configure CR/CSR space
- *
- * Access to the CR/CSR can be configured at power-up. The location of the
- * CR/CSR registers in the CR/CSR address space is determined by the boards
- * Auto-ID or Geographic address. This function ensures that the window is
- * enabled at an offset consistent with the boards geopgraphic address.
- */
-static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
- struct pci_dev *pdev)
-{
- unsigned int crcsr_addr;
- int tmp, slot;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- slot = ca91cx42_slot_get(ca91cx42_bridge);
-
- /* Write CSR Base Address if slot ID is supplied as a module param */
- if (geoid)
- iowrite32(geoid << 27, bridge->base + VCSR_BS);
-
- dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
- if (slot == 0) {
- dev_err(&pdev->dev, "Slot number is unset, not configuring "
- "CR/CSR space\n");
- return -EINVAL;
- }
-
- /* Allocate mem for CR/CSR image */
- bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
- &bridge->crcsr_bus);
- if (bridge->crcsr_kernel == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
- "image\n");
- return -ENOMEM;
- }
-
- memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
-
- crcsr_addr = slot * (512 * 1024);
- iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
-
- tmp = ioread32(bridge->base + VCSR_CTL);
- tmp |= CA91CX42_VCSR_CTL_EN;
- iowrite32(tmp, bridge->base + VCSR_CTL);
-
- return 0;
-}
-
-static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
- struct pci_dev *pdev)
-{
- u32 tmp;
- struct ca91cx42_driver *bridge;
-
- bridge = ca91cx42_bridge->driver_priv;
-
- /* Turn off CR/CSR space */
- tmp = ioread32(bridge->base + VCSR_CTL);
- tmp &= ~CA91CX42_VCSR_CTL_EN;
- iowrite32(tmp, bridge->base + VCSR_CTL);
-
- /* Free image */
- iowrite32(0, bridge->base + VCSR_TO);
-
- pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
- bridge->crcsr_bus);
-}
-
-static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- int retval, i;
- u32 data;
- struct list_head *pos = NULL;
- struct vme_bridge *ca91cx42_bridge;
- struct ca91cx42_driver *ca91cx42_device;
- struct vme_master_resource *master_image;
- struct vme_slave_resource *slave_image;
- struct vme_dma_resource *dma_ctrlr;
- struct vme_lm_resource *lm;
-
- /* We want to support more than one of each bridge so we need to
- * dynamically allocate the bridge structure
- */
- ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
-
- if (ca91cx42_bridge == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
- retval = -ENOMEM;
- goto err_struct;
- }
-
- ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
-
- if (ca91cx42_device == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
- retval = -ENOMEM;
- goto err_driver;
- }
-
- ca91cx42_bridge->driver_priv = ca91cx42_device;
-
- /* Enable the device */
- retval = pci_enable_device(pdev);
- if (retval) {
- dev_err(&pdev->dev, "Unable to enable device\n");
- goto err_enable;
- }
-
- /* Map Registers */
- retval = pci_request_regions(pdev, driver_name);
- if (retval) {
- dev_err(&pdev->dev, "Unable to reserve resources\n");
- goto err_resource;
- }
-
- /* map registers in BAR 0 */
- ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
- 4096);
- if (!ca91cx42_device->base) {
- dev_err(&pdev->dev, "Unable to remap CRG region\n");
- retval = -EIO;
- goto err_remap;
- }
-
- /* Check to see if the mapping worked out */
- data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
- if (data != PCI_VENDOR_ID_TUNDRA) {
- dev_err(&pdev->dev, "PCI_ID check failed\n");
- retval = -EIO;
- goto err_test;
- }
-
- /* Initialize wait queues & mutual exclusion flags */
- init_waitqueue_head(&ca91cx42_device->dma_queue);
- init_waitqueue_head(&ca91cx42_device->iack_queue);
- mutex_init(&ca91cx42_device->vme_int);
- mutex_init(&ca91cx42_device->vme_rmw);
-
- ca91cx42_bridge->parent = &pdev->dev;
- strcpy(ca91cx42_bridge->name, driver_name);
-
- /* Setup IRQ */
- retval = ca91cx42_irq_init(ca91cx42_bridge);
- if (retval != 0) {
- dev_err(&pdev->dev, "Chip Initialization failed.\n");
- goto err_irq;
- }
-
- /* Add master windows to list */
- INIT_LIST_HEAD(&ca91cx42_bridge->master_resources);
- for (i = 0; i < CA91C142_MAX_MASTER; i++) {
- master_image = kmalloc(sizeof(struct vme_master_resource),
- GFP_KERNEL);
- if (master_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "master resource structure\n");
- retval = -ENOMEM;
- goto err_master;
- }
- master_image->parent = ca91cx42_bridge;
- spin_lock_init(&master_image->lock);
- master_image->locked = 0;
- master_image->number = i;
- master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
- VME_CRCSR | VME_USER1 | VME_USER2;
- master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
- VME_SUPER | VME_USER | VME_PROG | VME_DATA;
- master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
- memset(&master_image->bus_resource, 0,
- sizeof(struct resource));
- master_image->kern_base = NULL;
- list_add_tail(&master_image->list,
- &ca91cx42_bridge->master_resources);
- }
-
- /* Add slave windows to list */
- INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources);
- for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
- slave_image = kmalloc(sizeof(struct vme_slave_resource),
- GFP_KERNEL);
- if (slave_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "slave resource structure\n");
- retval = -ENOMEM;
- goto err_slave;
- }
- slave_image->parent = ca91cx42_bridge;
- mutex_init(&slave_image->mtx);
- slave_image->locked = 0;
- slave_image->number = i;
- slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
- VME_USER2;
-
- /* Only windows 0 and 4 support A16 */
- if (i == 0 || i == 4)
- slave_image->address_attr |= VME_A16;
-
- slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
- VME_SUPER | VME_USER | VME_PROG | VME_DATA;
- list_add_tail(&slave_image->list,
- &ca91cx42_bridge->slave_resources);
- }
-
- /* Add dma engines to list */
- INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources);
- for (i = 0; i < CA91C142_MAX_DMA; i++) {
- dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
- GFP_KERNEL);
- if (dma_ctrlr == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "dma resource structure\n");
- retval = -ENOMEM;
- goto err_dma;
- }
- dma_ctrlr->parent = ca91cx42_bridge;
- mutex_init(&dma_ctrlr->mtx);
- dma_ctrlr->locked = 0;
- dma_ctrlr->number = i;
- dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
- VME_DMA_MEM_TO_VME;
- INIT_LIST_HEAD(&dma_ctrlr->pending);
- INIT_LIST_HEAD(&dma_ctrlr->running);
- list_add_tail(&dma_ctrlr->list,
- &ca91cx42_bridge->dma_resources);
- }
-
- /* Add location monitor to list */
- INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources);
- lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
- if (lm == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "location monitor resource structure\n");
- retval = -ENOMEM;
- goto err_lm;
- }
- lm->parent = ca91cx42_bridge;
- mutex_init(&lm->mtx);
- lm->locked = 0;
- lm->number = 1;
- lm->monitors = 4;
- list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
-
- ca91cx42_bridge->slave_get = ca91cx42_slave_get;
- ca91cx42_bridge->slave_set = ca91cx42_slave_set;
- ca91cx42_bridge->master_get = ca91cx42_master_get;
- ca91cx42_bridge->master_set = ca91cx42_master_set;
- ca91cx42_bridge->master_read = ca91cx42_master_read;
- ca91cx42_bridge->master_write = ca91cx42_master_write;
- ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
- ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
- ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
- ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
- ca91cx42_bridge->irq_set = ca91cx42_irq_set;
- ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
- ca91cx42_bridge->lm_set = ca91cx42_lm_set;
- ca91cx42_bridge->lm_get = ca91cx42_lm_get;
- ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
- ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
- ca91cx42_bridge->slot_get = ca91cx42_slot_get;
- ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
- ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
-
- data = ioread32(ca91cx42_device->base + MISC_CTL);
- dev_info(&pdev->dev, "Board is%s the VME system controller\n",
- (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
- dev_info(&pdev->dev, "Slot ID is %d\n",
- ca91cx42_slot_get(ca91cx42_bridge));
-
- if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
- dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
-
- /* Need to save ca91cx42_bridge pointer locally in link list for use in
- * ca91cx42_remove()
- */
- retval = vme_register_bridge(ca91cx42_bridge);
- if (retval != 0) {
- dev_err(&pdev->dev, "Chip Registration failed.\n");
- goto err_reg;
- }
-
- pci_set_drvdata(pdev, ca91cx42_bridge);
-
- return 0;
-
-err_reg:
- ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
-err_lm:
- /* resources are stored in link list */
- list_for_each(pos, &ca91cx42_bridge->lm_resources) {
- lm = list_entry(pos, struct vme_lm_resource, list);
- list_del(pos);
- kfree(lm);
- }
-err_dma:
- /* resources are stored in link list */
- list_for_each(pos, &ca91cx42_bridge->dma_resources) {
- dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
- list_del(pos);
- kfree(dma_ctrlr);
- }
-err_slave:
- /* resources are stored in link list */
- list_for_each(pos, &ca91cx42_bridge->slave_resources) {
- slave_image = list_entry(pos, struct vme_slave_resource, list);
- list_del(pos);
- kfree(slave_image);
- }
-err_master:
- /* resources are stored in link list */
- list_for_each(pos, &ca91cx42_bridge->master_resources) {
- master_image = list_entry(pos, struct vme_master_resource,
- list);
- list_del(pos);
- kfree(master_image);
- }
-
- ca91cx42_irq_exit(ca91cx42_device, pdev);
-err_irq:
-err_test:
- iounmap(ca91cx42_device->base);
-err_remap:
- pci_release_regions(pdev);
-err_resource:
- pci_disable_device(pdev);
-err_enable:
- kfree(ca91cx42_device);
-err_driver:
- kfree(ca91cx42_bridge);
-err_struct:
- return retval;
-
-}
-
-static void ca91cx42_remove(struct pci_dev *pdev)
-{
- struct list_head *pos = NULL;
- struct vme_master_resource *master_image;
- struct vme_slave_resource *slave_image;
- struct vme_dma_resource *dma_ctrlr;
- struct vme_lm_resource *lm;
- struct ca91cx42_driver *bridge;
- struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
-
- bridge = ca91cx42_bridge->driver_priv;
-
-
- /* Turn off Ints */
- iowrite32(0, bridge->base + LINT_EN);
-
- /* Turn off the windows */
- iowrite32(0x00800000, bridge->base + LSI0_CTL);
- iowrite32(0x00800000, bridge->base + LSI1_CTL);
- iowrite32(0x00800000, bridge->base + LSI2_CTL);
- iowrite32(0x00800000, bridge->base + LSI3_CTL);
- iowrite32(0x00800000, bridge->base + LSI4_CTL);
- iowrite32(0x00800000, bridge->base + LSI5_CTL);
- iowrite32(0x00800000, bridge->base + LSI6_CTL);
- iowrite32(0x00800000, bridge->base + LSI7_CTL);
- iowrite32(0x00F00000, bridge->base + VSI0_CTL);
- iowrite32(0x00F00000, bridge->base + VSI1_CTL);
- iowrite32(0x00F00000, bridge->base + VSI2_CTL);
- iowrite32(0x00F00000, bridge->base + VSI3_CTL);
- iowrite32(0x00F00000, bridge->base + VSI4_CTL);
- iowrite32(0x00F00000, bridge->base + VSI5_CTL);
- iowrite32(0x00F00000, bridge->base + VSI6_CTL);
- iowrite32(0x00F00000, bridge->base + VSI7_CTL);
-
- vme_unregister_bridge(ca91cx42_bridge);
-
- ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
-
- /* resources are stored in link list */
- list_for_each(pos, &ca91cx42_bridge->lm_resources) {
- lm = list_entry(pos, struct vme_lm_resource, list);
- list_del(pos);
- kfree(lm);
- }
-
- /* resources are stored in link list */
- list_for_each(pos, &ca91cx42_bridge->dma_resources) {
- dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
- list_del(pos);
- kfree(dma_ctrlr);
- }
-
- /* resources are stored in link list */
- list_for_each(pos, &ca91cx42_bridge->slave_resources) {
- slave_image = list_entry(pos, struct vme_slave_resource, list);
- list_del(pos);
- kfree(slave_image);
- }
-
- /* resources are stored in link list */
- list_for_each(pos, &ca91cx42_bridge->master_resources) {
- master_image = list_entry(pos, struct vme_master_resource,
- list);
- list_del(pos);
- kfree(master_image);
- }
-
- ca91cx42_irq_exit(bridge, pdev);
-
- iounmap(bridge->base);
-
- pci_release_regions(pdev);
-
- pci_disable_device(pdev);
-
- kfree(ca91cx42_bridge);
-}
-
-static void __exit ca91cx42_exit(void)
-{
- pci_unregister_driver(&ca91cx42_driver);
-}
-
-MODULE_PARM_DESC(geoid, "Override geographical addressing");
-module_param(geoid, int, 0);
-
-MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
-MODULE_LICENSE("GPL");
-
-module_init(ca91cx42_init);
-module_exit(ca91cx42_exit);
+++ /dev/null
-/*
- * ca91c042.h
- *
- * Support for the Tundra Universe 1 and Universe II VME bridge chips
- *
- * Author: Tom Armistead
- * Updated by Ajit Prem
- * Copyright 2004 Motorola Inc.
- *
- * Further updated by Martyn Welch <martyn.welch@ge.com>
- * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
- *
- * Derived from ca91c042.h by Michael Wyrick
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _CA91CX42_H
-#define _CA91CX42_H
-
-#ifndef PCI_VENDOR_ID_TUNDRA
-#define PCI_VENDOR_ID_TUNDRA 0x10e3
-#endif
-
-#ifndef PCI_DEVICE_ID_TUNDRA_CA91C142
-#define PCI_DEVICE_ID_TUNDRA_CA91C142 0x0000
-#endif
-
-/*
- * Define the number of each that the CA91C142 supports.
- */
-#define CA91C142_MAX_MASTER 8 /* Max Master Windows */
-#define CA91C142_MAX_SLAVE 8 /* Max Slave Windows */
-#define CA91C142_MAX_DMA 1 /* Max DMA Controllers */
-#define CA91C142_MAX_MAILBOX 4 /* Max Mail Box registers */
-
-/* Structure used to hold driver specific information */
-struct ca91cx42_driver {
- void __iomem *base; /* Base Address of device registers */
- wait_queue_head_t dma_queue;
- wait_queue_head_t iack_queue;
- wait_queue_head_t mbox_queue;
- void (*lm_callback[4])(int); /* Called in interrupt handler */
- void *crcsr_kernel;
- dma_addr_t crcsr_bus;
- struct mutex vme_rmw; /* Only one RMW cycle at a time */
- struct mutex vme_int; /*
- * Only one VME interrupt can be
- * generated at a time, provide locking
- */
-};
-
-/* See Page 2-77 in the Universe User Manual */
-struct ca91cx42_dma_descriptor {
- unsigned int dctl; /* DMA Control */
- unsigned int dtbc; /* Transfer Byte Count */
- unsigned int dla; /* PCI Address */
- unsigned int res1; /* Reserved */
- unsigned int dva; /* Vme Address */
- unsigned int res2; /* Reserved */
- unsigned int dcpp; /* Pointer to Numed Cmd Packet with rPN */
- unsigned int res3; /* Reserved */
-};
-
-struct ca91cx42_dma_entry {
- struct ca91cx42_dma_descriptor descriptor;
- struct list_head list;
-};
-
-/* Universe Register Offsets */
-/* general PCI configuration registers */
-#define CA91CX42_PCI_ID 0x000
-#define CA91CX42_PCI_CSR 0x004
-#define CA91CX42_PCI_CLASS 0x008
-#define CA91CX42_PCI_MISC0 0x00C
-#define CA91CX42_PCI_BS 0x010
-#define CA91CX42_PCI_MISC1 0x03C
-
-#define LSI0_CTL 0x0100
-#define LSI0_BS 0x0104
-#define LSI0_BD 0x0108
-#define LSI0_TO 0x010C
-
-#define LSI1_CTL 0x0114
-#define LSI1_BS 0x0118
-#define LSI1_BD 0x011C
-#define LSI1_TO 0x0120
-
-#define LSI2_CTL 0x0128
-#define LSI2_BS 0x012C
-#define LSI2_BD 0x0130
-#define LSI2_TO 0x0134
-
-#define LSI3_CTL 0x013C
-#define LSI3_BS 0x0140
-#define LSI3_BD 0x0144
-#define LSI3_TO 0x0148
-
-#define LSI4_CTL 0x01A0
-#define LSI4_BS 0x01A4
-#define LSI4_BD 0x01A8
-#define LSI4_TO 0x01AC
-
-#define LSI5_CTL 0x01B4
-#define LSI5_BS 0x01B8
-#define LSI5_BD 0x01BC
-#define LSI5_TO 0x01C0
-
-#define LSI6_CTL 0x01C8
-#define LSI6_BS 0x01CC
-#define LSI6_BD 0x01D0
-#define LSI6_TO 0x01D4
-
-#define LSI7_CTL 0x01DC
-#define LSI7_BS 0x01E0
-#define LSI7_BD 0x01E4
-#define LSI7_TO 0x01E8
-
-static const int CA91CX42_LSI_CTL[] = { LSI0_CTL, LSI1_CTL, LSI2_CTL, LSI3_CTL,
- LSI4_CTL, LSI5_CTL, LSI6_CTL, LSI7_CTL };
-
-static const int CA91CX42_LSI_BS[] = { LSI0_BS, LSI1_BS, LSI2_BS, LSI3_BS,
- LSI4_BS, LSI5_BS, LSI6_BS, LSI7_BS };
-
-static const int CA91CX42_LSI_BD[] = { LSI0_BD, LSI1_BD, LSI2_BD, LSI3_BD,
- LSI4_BD, LSI5_BD, LSI6_BD, LSI7_BD };
-
-static const int CA91CX42_LSI_TO[] = { LSI0_TO, LSI1_TO, LSI2_TO, LSI3_TO,
- LSI4_TO, LSI5_TO, LSI6_TO, LSI7_TO };
-
-#define SCYC_CTL 0x0170
-#define SCYC_ADDR 0x0174
-#define SCYC_EN 0x0178
-#define SCYC_CMP 0x017C
-#define SCYC_SWP 0x0180
-#define LMISC 0x0184
-#define SLSI 0x0188
-#define L_CMDERR 0x018C
-#define LAERR 0x0190
-
-#define DCTL 0x0200
-#define DTBC 0x0204
-#define DLA 0x0208
-#define DVA 0x0210
-#define DCPP 0x0218
-#define DGCS 0x0220
-#define D_LLUE 0x0224
-
-#define LINT_EN 0x0300
-#define LINT_STAT 0x0304
-#define LINT_MAP0 0x0308
-#define LINT_MAP1 0x030C
-#define VINT_EN 0x0310
-#define VINT_STAT 0x0314
-#define VINT_MAP0 0x0318
-#define VINT_MAP1 0x031C
-#define STATID 0x0320
-
-#define V1_STATID 0x0324
-#define V2_STATID 0x0328
-#define V3_STATID 0x032C
-#define V4_STATID 0x0330
-#define V5_STATID 0x0334
-#define V6_STATID 0x0338
-#define V7_STATID 0x033C
-
-static const int CA91CX42_V_STATID[8] = { 0, V1_STATID, V2_STATID, V3_STATID,
- V4_STATID, V5_STATID, V6_STATID,
- V7_STATID };
-
-#define LINT_MAP2 0x0340
-#define VINT_MAP2 0x0344
-
-#define MBOX0 0x0348
-#define MBOX1 0x034C
-#define MBOX2 0x0350
-#define MBOX3 0x0354
-#define SEMA0 0x0358
-#define SEMA1 0x035C
-
-#define MAST_CTL 0x0400
-#define MISC_CTL 0x0404
-#define MISC_STAT 0x0408
-#define USER_AM 0x040C
-
-#define VSI0_CTL 0x0F00
-#define VSI0_BS 0x0F04
-#define VSI0_BD 0x0F08
-#define VSI0_TO 0x0F0C
-
-#define VSI1_CTL 0x0F14
-#define VSI1_BS 0x0F18
-#define VSI1_BD 0x0F1C
-#define VSI1_TO 0x0F20
-
-#define VSI2_CTL 0x0F28
-#define VSI2_BS 0x0F2C
-#define VSI2_BD 0x0F30
-#define VSI2_TO 0x0F34
-
-#define VSI3_CTL 0x0F3C
-#define VSI3_BS 0x0F40
-#define VSI3_BD 0x0F44
-#define VSI3_TO 0x0F48
-
-#define LM_CTL 0x0F64
-#define LM_BS 0x0F68
-
-#define VRAI_CTL 0x0F70
-
-#define VRAI_BS 0x0F74
-#define VCSR_CTL 0x0F80
-#define VCSR_TO 0x0F84
-#define V_AMERR 0x0F88
-#define VAERR 0x0F8C
-
-#define VSI4_CTL 0x0F90
-#define VSI4_BS 0x0F94
-#define VSI4_BD 0x0F98
-#define VSI4_TO 0x0F9C
-
-#define VSI5_CTL 0x0FA4
-#define VSI5_BS 0x0FA8
-#define VSI5_BD 0x0FAC
-#define VSI5_TO 0x0FB0
-
-#define VSI6_CTL 0x0FB8
-#define VSI6_BS 0x0FBC
-#define VSI6_BD 0x0FC0
-#define VSI6_TO 0x0FC4
-
-#define VSI7_CTL 0x0FCC
-#define VSI7_BS 0x0FD0
-#define VSI7_BD 0x0FD4
-#define VSI7_TO 0x0FD8
-
-static const int CA91CX42_VSI_CTL[] = { VSI0_CTL, VSI1_CTL, VSI2_CTL, VSI3_CTL,
- VSI4_CTL, VSI5_CTL, VSI6_CTL, VSI7_CTL };
-
-static const int CA91CX42_VSI_BS[] = { VSI0_BS, VSI1_BS, VSI2_BS, VSI3_BS,
- VSI4_BS, VSI5_BS, VSI6_BS, VSI7_BS };
-
-static const int CA91CX42_VSI_BD[] = { VSI0_BD, VSI1_BD, VSI2_BD, VSI3_BD,
- VSI4_BD, VSI5_BD, VSI6_BD, VSI7_BD };
-
-static const int CA91CX42_VSI_TO[] = { VSI0_TO, VSI1_TO, VSI2_TO, VSI3_TO,
- VSI4_TO, VSI5_TO, VSI6_TO, VSI7_TO };
-
-#define VCSR_CLR 0x0FF4
-#define VCSR_SET 0x0FF8
-#define VCSR_BS 0x0FFC
-
-/*
- * PCI Class Register
- * offset 008
- */
-#define CA91CX42_BM_PCI_CLASS_BASE 0xFF000000
-#define CA91CX42_OF_PCI_CLASS_BASE 24
-#define CA91CX42_BM_PCI_CLASS_SUB 0x00FF0000
-#define CA91CX42_OF_PCI_CLASS_SUB 16
-#define CA91CX42_BM_PCI_CLASS_PROG 0x0000FF00
-#define CA91CX42_OF_PCI_CLASS_PROG 8
-#define CA91CX42_BM_PCI_CLASS_RID 0x000000FF
-#define CA91CX42_OF_PCI_CLASS_RID 0
-
-#define CA91CX42_OF_PCI_CLASS_RID_UNIVERSE_I 0
-#define CA91CX42_OF_PCI_CLASS_RID_UNIVERSE_II 1
-
-/*
- * PCI Misc Register
- * offset 00C
- */
-#define CA91CX42_BM_PCI_MISC0_BISTC 0x80000000
-#define CA91CX42_BM_PCI_MISC0_SBIST 0x60000000
-#define CA91CX42_BM_PCI_MISC0_CCODE 0x0F000000
-#define CA91CX42_BM_PCI_MISC0_MFUNCT 0x00800000
-#define CA91CX42_BM_PCI_MISC0_LAYOUT 0x007F0000
-#define CA91CX42_BM_PCI_MISC0_LTIMER 0x0000FF00
-#define CA91CX42_OF_PCI_MISC0_LTIMER 8
-
-
-/*
- * LSI Control Register
- * offset 100
- */
-#define CA91CX42_LSI_CTL_EN (1<<31)
-#define CA91CX42_LSI_CTL_PWEN (1<<30)
-
-#define CA91CX42_LSI_CTL_VDW_M (3<<22)
-#define CA91CX42_LSI_CTL_VDW_D8 0
-#define CA91CX42_LSI_CTL_VDW_D16 (1<<22)
-#define CA91CX42_LSI_CTL_VDW_D32 (1<<23)
-#define CA91CX42_LSI_CTL_VDW_D64 (3<<22)
-
-#define CA91CX42_LSI_CTL_VAS_M (7<<16)
-#define CA91CX42_LSI_CTL_VAS_A16 0
-#define CA91CX42_LSI_CTL_VAS_A24 (1<<16)
-#define CA91CX42_LSI_CTL_VAS_A32 (1<<17)
-#define CA91CX42_LSI_CTL_VAS_CRCSR (5<<16)
-#define CA91CX42_LSI_CTL_VAS_USER1 (3<<17)
-#define CA91CX42_LSI_CTL_VAS_USER2 (7<<16)
-
-#define CA91CX42_LSI_CTL_PGM_M (1<<14)
-#define CA91CX42_LSI_CTL_PGM_DATA 0
-#define CA91CX42_LSI_CTL_PGM_PGM (1<<14)
-
-#define CA91CX42_LSI_CTL_SUPER_M (1<<12)
-#define CA91CX42_LSI_CTL_SUPER_NPRIV 0
-#define CA91CX42_LSI_CTL_SUPER_SUPR (1<<12)
-
-#define CA91CX42_LSI_CTL_VCT_M (1<<8)
-#define CA91CX42_LSI_CTL_VCT_BLT (1<<8)
-#define CA91CX42_LSI_CTL_VCT_MBLT (1<<8)
-#define CA91CX42_LSI_CTL_LAS (1<<0)
-
-/*
- * SCYC_CTL Register
- * offset 178
- */
-#define CA91CX42_SCYC_CTL_LAS_PCIMEM 0
-#define CA91CX42_SCYC_CTL_LAS_PCIIO (1<<2)
-
-#define CA91CX42_SCYC_CTL_CYC_M (3<<0)
-#define CA91CX42_SCYC_CTL_CYC_RMW (1<<0)
-#define CA91CX42_SCYC_CTL_CYC_ADOH (1<<1)
-
-/*
- * LMISC Register
- * offset 184
- */
-#define CA91CX42_BM_LMISC_CRT 0xF0000000
-#define CA91CX42_OF_LMISC_CRT 28
-#define CA91CX42_BM_LMISC_CWT 0x0F000000
-#define CA91CX42_OF_LMISC_CWT 24
-
-/*
- * SLSI Register
- * offset 188
- */
-#define CA91CX42_BM_SLSI_EN 0x80000000
-#define CA91CX42_BM_SLSI_PWEN 0x40000000
-#define CA91CX42_BM_SLSI_VDW 0x00F00000
-#define CA91CX42_OF_SLSI_VDW 20
-#define CA91CX42_BM_SLSI_PGM 0x0000F000
-#define CA91CX42_OF_SLSI_PGM 12
-#define CA91CX42_BM_SLSI_SUPER 0x00000F00
-#define CA91CX42_OF_SLSI_SUPER 8
-#define CA91CX42_BM_SLSI_BS 0x000000F6
-#define CA91CX42_OF_SLSI_BS 2
-#define CA91CX42_BM_SLSI_LAS 0x00000003
-#define CA91CX42_OF_SLSI_LAS 0
-#define CA91CX42_BM_SLSI_RESERVED 0x3F0F0000
-
-/*
- * DCTL Register
- * offset 200
- */
-#define CA91CX42_DCTL_L2V (1<<31)
-#define CA91CX42_DCTL_VDW_M (3<<22)
-#define CA91CX42_DCTL_VDW_M (3<<22)
-#define CA91CX42_DCTL_VDW_D8 0
-#define CA91CX42_DCTL_VDW_D16 (1<<22)
-#define CA91CX42_DCTL_VDW_D32 (1<<23)
-#define CA91CX42_DCTL_VDW_D64 (3<<22)
-
-#define CA91CX42_DCTL_VAS_M (7<<16)
-#define CA91CX42_DCTL_VAS_A16 0
-#define CA91CX42_DCTL_VAS_A24 (1<<16)
-#define CA91CX42_DCTL_VAS_A32 (1<<17)
-#define CA91CX42_DCTL_VAS_USER1 (3<<17)
-#define CA91CX42_DCTL_VAS_USER2 (7<<16)
-
-#define CA91CX42_DCTL_PGM_M (1<<14)
-#define CA91CX42_DCTL_PGM_DATA 0
-#define CA91CX42_DCTL_PGM_PGM (1<<14)
-
-#define CA91CX42_DCTL_SUPER_M (1<<12)
-#define CA91CX42_DCTL_SUPER_NPRIV 0
-#define CA91CX42_DCTL_SUPER_SUPR (1<<12)
-
-#define CA91CX42_DCTL_VCT_M (1<<8)
-#define CA91CX42_DCTL_VCT_BLT (1<<8)
-#define CA91CX42_DCTL_LD64EN (1<<7)
-
-/*
- * DCPP Register
- * offset 218
- */
-#define CA91CX42_DCPP_M 0xf
-#define CA91CX42_DCPP_NULL (1<<0)
-
-/*
- * DMA General Control/Status Register (DGCS)
- * offset 220
- */
-#define CA91CX42_DGCS_GO (1<<31)
-#define CA91CX42_DGCS_STOP_REQ (1<<30)
-#define CA91CX42_DGCS_HALT_REQ (1<<29)
-#define CA91CX42_DGCS_CHAIN (1<<27)
-
-#define CA91CX42_DGCS_VON_M (7<<20)
-
-#define CA91CX42_DGCS_VOFF_M (0xf<<16)
-
-#define CA91CX42_DGCS_ACT (1<<15)
-#define CA91CX42_DGCS_STOP (1<<14)
-#define CA91CX42_DGCS_HALT (1<<13)
-#define CA91CX42_DGCS_DONE (1<<11)
-#define CA91CX42_DGCS_LERR (1<<10)
-#define CA91CX42_DGCS_VERR (1<<9)
-#define CA91CX42_DGCS_PERR (1<<8)
-#define CA91CX42_DGCS_INT_STOP (1<<6)
-#define CA91CX42_DGCS_INT_HALT (1<<5)
-#define CA91CX42_DGCS_INT_DONE (1<<3)
-#define CA91CX42_DGCS_INT_LERR (1<<2)
-#define CA91CX42_DGCS_INT_VERR (1<<1)
-#define CA91CX42_DGCS_INT_PERR (1<<0)
-
-/*
- * PCI Interrupt Enable Register
- * offset 300
- */
-#define CA91CX42_LINT_LM3 0x00800000
-#define CA91CX42_LINT_LM2 0x00400000
-#define CA91CX42_LINT_LM1 0x00200000
-#define CA91CX42_LINT_LM0 0x00100000
-#define CA91CX42_LINT_MBOX3 0x00080000
-#define CA91CX42_LINT_MBOX2 0x00040000
-#define CA91CX42_LINT_MBOX1 0x00020000
-#define CA91CX42_LINT_MBOX0 0x00010000
-#define CA91CX42_LINT_ACFAIL 0x00008000
-#define CA91CX42_LINT_SYSFAIL 0x00004000
-#define CA91CX42_LINT_SW_INT 0x00002000
-#define CA91CX42_LINT_SW_IACK 0x00001000
-
-#define CA91CX42_LINT_VERR 0x00000400
-#define CA91CX42_LINT_LERR 0x00000200
-#define CA91CX42_LINT_DMA 0x00000100
-#define CA91CX42_LINT_VIRQ7 0x00000080
-#define CA91CX42_LINT_VIRQ6 0x00000040
-#define CA91CX42_LINT_VIRQ5 0x00000020
-#define CA91CX42_LINT_VIRQ4 0x00000010
-#define CA91CX42_LINT_VIRQ3 0x00000008
-#define CA91CX42_LINT_VIRQ2 0x00000004
-#define CA91CX42_LINT_VIRQ1 0x00000002
-#define CA91CX42_LINT_VOWN 0x00000001
-
-static const int CA91CX42_LINT_VIRQ[] = { 0, CA91CX42_LINT_VIRQ1,
- CA91CX42_LINT_VIRQ2, CA91CX42_LINT_VIRQ3,
- CA91CX42_LINT_VIRQ4, CA91CX42_LINT_VIRQ5,
- CA91CX42_LINT_VIRQ6, CA91CX42_LINT_VIRQ7 };
-
-#define CA91CX42_LINT_MBOX 0x000F0000
-
-static const int CA91CX42_LINT_LM[] = { CA91CX42_LINT_LM0, CA91CX42_LINT_LM1,
- CA91CX42_LINT_LM2, CA91CX42_LINT_LM3 };
-
-/*
- * MAST_CTL Register
- * offset 400
- */
-#define CA91CX42_BM_MAST_CTL_MAXRTRY 0xF0000000
-#define CA91CX42_OF_MAST_CTL_MAXRTRY 28
-#define CA91CX42_BM_MAST_CTL_PWON 0x0F000000
-#define CA91CX42_OF_MAST_CTL_PWON 24
-#define CA91CX42_BM_MAST_CTL_VRL 0x00C00000
-#define CA91CX42_OF_MAST_CTL_VRL 22
-#define CA91CX42_BM_MAST_CTL_VRM 0x00200000
-#define CA91CX42_BM_MAST_CTL_VREL 0x00100000
-#define CA91CX42_BM_MAST_CTL_VOWN 0x00080000
-#define CA91CX42_BM_MAST_CTL_VOWN_ACK 0x00040000
-#define CA91CX42_BM_MAST_CTL_PABS 0x00001000
-#define CA91CX42_BM_MAST_CTL_BUS_NO 0x0000000F
-#define CA91CX42_OF_MAST_CTL_BUS_NO 0
-
-/*
- * MISC_CTL Register
- * offset 404
- */
-#define CA91CX42_MISC_CTL_VBTO 0xF0000000
-#define CA91CX42_MISC_CTL_VARB 0x04000000
-#define CA91CX42_MISC_CTL_VARBTO 0x03000000
-#define CA91CX42_MISC_CTL_SW_LRST 0x00800000
-#define CA91CX42_MISC_CTL_SW_SRST 0x00400000
-#define CA91CX42_MISC_CTL_BI 0x00100000
-#define CA91CX42_MISC_CTL_ENGBI 0x00080000
-#define CA91CX42_MISC_CTL_RESCIND 0x00040000
-#define CA91CX42_MISC_CTL_SYSCON 0x00020000
-#define CA91CX42_MISC_CTL_V64AUTO 0x00010000
-#define CA91CX42_MISC_CTL_RESERVED 0x0820FFFF
-
-#define CA91CX42_OF_MISC_CTL_VARBTO 24
-#define CA91CX42_OF_MISC_CTL_VBTO 28
-
-/*
- * MISC_STAT Register
- * offset 408
- */
-#define CA91CX42_BM_MISC_STAT_ENDIAN 0x80000000
-#define CA91CX42_BM_MISC_STAT_LCLSIZE 0x40000000
-#define CA91CX42_BM_MISC_STAT_DY4AUTO 0x08000000
-#define CA91CX42_BM_MISC_STAT_MYBBSY 0x00200000
-#define CA91CX42_BM_MISC_STAT_DY4DONE 0x00080000
-#define CA91CX42_BM_MISC_STAT_TXFE 0x00040000
-#define CA91CX42_BM_MISC_STAT_RXFE 0x00020000
-#define CA91CX42_BM_MISC_STAT_DY4AUTOID 0x0000FF00
-#define CA91CX42_OF_MISC_STAT_DY4AUTOID 8
-
-/*
- * VSI Control Register
- * offset F00
- */
-#define CA91CX42_VSI_CTL_EN (1<<31)
-#define CA91CX42_VSI_CTL_PWEN (1<<30)
-#define CA91CX42_VSI_CTL_PREN (1<<29)
-
-#define CA91CX42_VSI_CTL_PGM_M (3<<22)
-#define CA91CX42_VSI_CTL_PGM_DATA (1<<22)
-#define CA91CX42_VSI_CTL_PGM_PGM (1<<23)
-
-#define CA91CX42_VSI_CTL_SUPER_M (3<<20)
-#define CA91CX42_VSI_CTL_SUPER_NPRIV (1<<20)
-#define CA91CX42_VSI_CTL_SUPER_SUPR (1<<21)
-
-#define CA91CX42_VSI_CTL_VAS_M (7<<16)
-#define CA91CX42_VSI_CTL_VAS_A16 0
-#define CA91CX42_VSI_CTL_VAS_A24 (1<<16)
-#define CA91CX42_VSI_CTL_VAS_A32 (1<<17)
-#define CA91CX42_VSI_CTL_VAS_USER1 (3<<17)
-#define CA91CX42_VSI_CTL_VAS_USER2 (7<<16)
-
-#define CA91CX42_VSI_CTL_LD64EN (1<<7)
-#define CA91CX42_VSI_CTL_LLRMW (1<<6)
-
-#define CA91CX42_VSI_CTL_LAS_M (3<<0)
-#define CA91CX42_VSI_CTL_LAS_PCI_MS 0
-#define CA91CX42_VSI_CTL_LAS_PCI_IO (1<<0)
-#define CA91CX42_VSI_CTL_LAS_PCI_CONF (1<<1)
-
-/* LM_CTL Register
- * offset F64
- */
-#define CA91CX42_LM_CTL_EN (1<<31)
-#define CA91CX42_LM_CTL_PGM (1<<23)
-#define CA91CX42_LM_CTL_DATA (1<<22)
-#define CA91CX42_LM_CTL_SUPR (1<<21)
-#define CA91CX42_LM_CTL_NPRIV (1<<20)
-#define CA91CX42_LM_CTL_AS_M (5<<16)
-#define CA91CX42_LM_CTL_AS_A16 0
-#define CA91CX42_LM_CTL_AS_A24 (1<<16)
-#define CA91CX42_LM_CTL_AS_A32 (1<<17)
-
-/*
- * VRAI_CTL Register
- * offset F70
- */
-#define CA91CX42_BM_VRAI_CTL_EN 0x80000000
-#define CA91CX42_BM_VRAI_CTL_PGM 0x00C00000
-#define CA91CX42_OF_VRAI_CTL_PGM 22
-#define CA91CX42_BM_VRAI_CTL_SUPER 0x00300000
-#define CA91CX42_OF_VRAI_CTL_SUPER 20
-#define CA91CX42_BM_VRAI_CTL_VAS 0x00030000
-#define CA91CX42_OF_VRAI_CTL_VAS 16
-
-/* VCSR_CTL Register
- * offset F80
- */
-#define CA91CX42_VCSR_CTL_EN (1<<31)
-
-#define CA91CX42_VCSR_CTL_LAS_M (3<<0)
-#define CA91CX42_VCSR_CTL_LAS_PCI_MS 0
-#define CA91CX42_VCSR_CTL_LAS_PCI_IO (1<<0)
-#define CA91CX42_VCSR_CTL_LAS_PCI_CONF (1<<1)
-
-/* VCSR_BS Register
- * offset FFC
- */
-#define CA91CX42_VCSR_BS_SLOT_M (0x1F<<27)
-
-#endif /* _CA91CX42_H */
+++ /dev/null
-/*
- * Support for the Tundra TSI148 VME-PCI Bridge Chip
- *
- * Author: Martyn Welch <martyn.welch@ge.com>
- * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
- *
- * Based on work by Tom Armistead and Ajit Prem
- * Copyright 2004 Motorola Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/proc_fs.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/byteorder/generic.h>
-
-#include "../vme.h"
-#include "../vme_bridge.h"
-#include "vme_tsi148.h"
-
-static int __init tsi148_init(void);
-static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
-static void tsi148_remove(struct pci_dev *);
-static void __exit tsi148_exit(void);
-
-
-/* Module parameter */
-static bool err_chk;
-static int geoid;
-
-static const char driver_name[] = "vme_tsi148";
-
-static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
- { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
- { },
-};
-
-static struct pci_driver tsi148_driver = {
- .name = driver_name,
- .id_table = tsi148_ids,
- .probe = tsi148_probe,
- .remove = tsi148_remove,
-};
-
-static void reg_join(unsigned int high, unsigned int low,
- unsigned long long *variable)
-{
- *variable = (unsigned long long)high << 32;
- *variable |= (unsigned long long)low;
-}
-
-static void reg_split(unsigned long long variable, unsigned int *high,
- unsigned int *low)
-{
- *low = (unsigned int)variable & 0xFFFFFFFF;
- *high = (unsigned int)(variable >> 32);
-}
-
-/*
- * Wakes up DMA queue.
- */
-static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
- int channel_mask)
-{
- u32 serviced = 0;
-
- if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
- wake_up(&bridge->dma_queue[0]);
- serviced |= TSI148_LCSR_INTC_DMA0C;
- }
- if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
- wake_up(&bridge->dma_queue[1]);
- serviced |= TSI148_LCSR_INTC_DMA1C;
- }
-
- return serviced;
-}
-
-/*
- * Wake up location monitor queue
- */
-static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
-{
- int i;
- u32 serviced = 0;
-
- for (i = 0; i < 4; i++) {
- if (stat & TSI148_LCSR_INTS_LMS[i]) {
- /* We only enable interrupts if the callback is set */
- bridge->lm_callback[i](i);
- serviced |= TSI148_LCSR_INTC_LMC[i];
- }
- }
-
- return serviced;
-}
-
-/*
- * Wake up mail box queue.
- *
- * XXX This functionality is not exposed up though API.
- */
-static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
-{
- int i;
- u32 val;
- u32 serviced = 0;
- struct tsi148_driver *bridge;
-
- bridge = tsi148_bridge->driver_priv;
-
- for (i = 0; i < 4; i++) {
- if (stat & TSI148_LCSR_INTS_MBS[i]) {
- val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
- dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
- ": 0x%x\n", i, val);
- serviced |= TSI148_LCSR_INTC_MBC[i];
- }
- }
-
- return serviced;
-}
-
-/*
- * Display error & status message when PERR (PCI) exception interrupt occurs.
- */
-static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
-{
- struct tsi148_driver *bridge;
-
- bridge = tsi148_bridge->driver_priv;
-
- dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
- "attributes: %08x\n",
- ioread32be(bridge->base + TSI148_LCSR_EDPAU),
- ioread32be(bridge->base + TSI148_LCSR_EDPAL),
- ioread32be(bridge->base + TSI148_LCSR_EDPAT));
-
- dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
- "completion reg: %08x\n",
- ioread32be(bridge->base + TSI148_LCSR_EDPXA),
- ioread32be(bridge->base + TSI148_LCSR_EDPXS));
-
- iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
-
- return TSI148_LCSR_INTC_PERRC;
-}
-
-/*
- * Save address and status when VME error interrupt occurs.
- */
-static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
-{
- unsigned int error_addr_high, error_addr_low;
- unsigned long long error_addr;
- u32 error_attrib;
- struct vme_bus_error *error;
- struct tsi148_driver *bridge;
-
- bridge = tsi148_bridge->driver_priv;
-
- error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
- error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
- error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
-
- reg_join(error_addr_high, error_addr_low, &error_addr);
-
- /* Check for exception register overflow (we have lost error data) */
- if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
- dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
- "Occurred\n");
- }
-
- error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
- if (error) {
- error->address = error_addr;
- error->attributes = error_attrib;
- list_add_tail(&error->list, &tsi148_bridge->vme_errors);
- } else {
- dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
- "VMEbus Error reporting\n");
- dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
- "0x%llx, attributes: %08x\n", error_addr, error_attrib);
- }
-
- /* Clear Status */
- iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
-
- return TSI148_LCSR_INTC_VERRC;
-}
-
-/*
- * Wake up IACK queue.
- */
-static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
-{
- wake_up(&bridge->iack_queue);
-
- return TSI148_LCSR_INTC_IACKC;
-}
-
-/*
- * Calling VME bus interrupt callback if provided.
- */
-static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
- u32 stat)
-{
- int vec, i, serviced = 0;
- struct tsi148_driver *bridge;
-
- bridge = tsi148_bridge->driver_priv;
-
- for (i = 7; i > 0; i--) {
- if (stat & (1 << i)) {
- /*
- * Note: Even though the registers are defined as
- * 32-bits in the spec, we only want to issue 8-bit
- * IACK cycles on the bus, read from offset 3.
- */
- vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
-
- vme_irq_handler(tsi148_bridge, i, vec);
-
- serviced |= (1 << i);
- }
- }
-
- return serviced;
-}
-
-/*
- * Top level interrupt handler. Clears appropriate interrupt status bits and
- * then calls appropriate sub handler(s).
- */
-static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
-{
- u32 stat, enable, serviced = 0;
- struct vme_bridge *tsi148_bridge;
- struct tsi148_driver *bridge;
-
- tsi148_bridge = ptr;
-
- bridge = tsi148_bridge->driver_priv;
-
- /* Determine which interrupts are unmasked and set */
- enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
- stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
-
- /* Only look at unmasked interrupts */
- stat &= enable;
-
- if (unlikely(!stat))
- return IRQ_NONE;
-
- /* Call subhandlers as appropriate */
- /* DMA irqs */
- if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
- serviced |= tsi148_DMA_irqhandler(bridge, stat);
-
- /* Location monitor irqs */
- if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
- TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
- serviced |= tsi148_LM_irqhandler(bridge, stat);
-
- /* Mail box irqs */
- if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
- TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
- serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
-
- /* PCI bus error */
- if (stat & TSI148_LCSR_INTS_PERRS)
- serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
-
- /* VME bus error */
- if (stat & TSI148_LCSR_INTS_VERRS)
- serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
-
- /* IACK irq */
- if (stat & TSI148_LCSR_INTS_IACKS)
- serviced |= tsi148_IACK_irqhandler(bridge);
-
- /* VME bus irqs */
- if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
- TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
- TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
- TSI148_LCSR_INTS_IRQ1S))
- serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
-
- /* Clear serviced interrupts */
- iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
-
- return IRQ_HANDLED;
-}
-
-static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
-{
- int result;
- unsigned int tmp;
- struct pci_dev *pdev;
- struct tsi148_driver *bridge;
-
- pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
-
- bridge = tsi148_bridge->driver_priv;
-
- /* Initialise list for VME bus errors */
- INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
-
- mutex_init(&tsi148_bridge->irq_mtx);
-
- result = request_irq(pdev->irq,
- tsi148_irqhandler,
- IRQF_SHARED,
- driver_name, tsi148_bridge);
- if (result) {
- dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
- "vector %02X\n", pdev->irq);
- return result;
- }
-
- /* Enable and unmask interrupts */
- tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
- TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
- TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
- TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
- TSI148_LCSR_INTEO_IACKEO;
-
- /* This leaves the following interrupts masked.
- * TSI148_LCSR_INTEO_VIEEO
- * TSI148_LCSR_INTEO_SYSFLEO
- * TSI148_LCSR_INTEO_ACFLEO
- */
-
- /* Don't enable Location Monitor interrupts here - they will be
- * enabled when the location monitors are properly configured and
- * a callback has been attached.
- * TSI148_LCSR_INTEO_LM0EO
- * TSI148_LCSR_INTEO_LM1EO
- * TSI148_LCSR_INTEO_LM2EO
- * TSI148_LCSR_INTEO_LM3EO
- */
-
- /* Don't enable VME interrupts until we add a handler, else the board
- * will respond to it and we don't want that unless it knows how to
- * properly deal with it.
- * TSI148_LCSR_INTEO_IRQ7EO
- * TSI148_LCSR_INTEO_IRQ6EO
- * TSI148_LCSR_INTEO_IRQ5EO
- * TSI148_LCSR_INTEO_IRQ4EO
- * TSI148_LCSR_INTEO_IRQ3EO
- * TSI148_LCSR_INTEO_IRQ2EO
- * TSI148_LCSR_INTEO_IRQ1EO
- */
-
- iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
- iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
-
- return 0;
-}
-
-static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
- struct pci_dev *pdev)
-{
- struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
-
- /* Turn off interrupts */
- iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
- iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
-
- /* Clear all interrupts */
- iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
-
- /* Detach interrupt handler */
- free_irq(pdev->irq, tsi148_bridge);
-}
-
-/*
- * Check to see if an IACk has been received, return true (1) or false (0).
- */
-static int tsi148_iack_received(struct tsi148_driver *bridge)
-{
- u32 tmp;
-
- tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
-
- if (tmp & TSI148_LCSR_VICR_IRQS)
- return 0;
- else
- return 1;
-}
-
-/*
- * Configure VME interrupt
- */
-static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
- int state, int sync)
-{
- struct pci_dev *pdev;
- u32 tmp;
- struct tsi148_driver *bridge;
-
- bridge = tsi148_bridge->driver_priv;
-
- /* We need to do the ordering differently for enabling and disabling */
- if (state == 0) {
- tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
- tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
- iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
-
- tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
- tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
- iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
-
- if (sync != 0) {
- pdev = container_of(tsi148_bridge->parent,
- struct pci_dev, dev);
-
- synchronize_irq(pdev->irq);
- }
- } else {
- tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
- tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
- iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
-
- tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
- tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
- iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
- }
-}
-
-/*
- * Generate a VME bus interrupt at the requested level & vector. Wait for
- * interrupt to be acked.
- */
-static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
- int statid)
-{
- u32 tmp;
- struct tsi148_driver *bridge;
-
- bridge = tsi148_bridge->driver_priv;
-
- mutex_lock(&bridge->vme_int);
-
- /* Read VICR register */
- tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
-
- /* Set Status/ID */
- tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
- (statid & TSI148_LCSR_VICR_STID_M);
- iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
-
- /* Assert VMEbus IRQ */
- tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
- iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
-
- /* XXX Consider implementing a timeout? */
- wait_event_interruptible(bridge->iack_queue,
- tsi148_iack_received(bridge));
-
- mutex_unlock(&bridge->vme_int);
-
- return 0;
-}
-
-/*
- * Find the first error in this address range
- */
-static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
- u32 aspace, unsigned long long address, size_t count)
-{
- struct list_head *err_pos;
- struct vme_bus_error *vme_err, *valid = NULL;
- unsigned long long bound;
-
- bound = address + count;
-
- /*
- * XXX We are currently not looking at the address space when parsing
- * for errors. This is because parsing the Address Modifier Codes
- * is going to be quite resource intensive to do properly. We
- * should be OK just looking at the addresses and this is certainly
- * much better than what we had before.
- */
- err_pos = NULL;
- /* Iterate through errors */
- list_for_each(err_pos, &tsi148_bridge->vme_errors) {
- vme_err = list_entry(err_pos, struct vme_bus_error, list);
- if ((vme_err->address >= address) &&
- (vme_err->address < bound)) {
-
- valid = vme_err;
- break;
- }
- }
-
- return valid;
-}
-
-/*
- * Clear errors in the provided address range.
- */
-static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
- u32 aspace, unsigned long long address, size_t count)
-{
- struct list_head *err_pos, *temp;
- struct vme_bus_error *vme_err;
- unsigned long long bound;
-
- bound = address + count;
-
- /*
- * XXX We are currently not looking at the address space when parsing
- * for errors. This is because parsing the Address Modifier Codes
- * is going to be quite resource intensive to do properly. We
- * should be OK just looking at the addresses and this is certainly
- * much better than what we had before.
- */
- err_pos = NULL;
- /* Iterate through errors */
- list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
- vme_err = list_entry(err_pos, struct vme_bus_error, list);
-
- if ((vme_err->address >= address) &&
- (vme_err->address < bound)) {
-
- list_del(err_pos);
- kfree(vme_err);
- }
- }
-}
-
-/*
- * Initialize a slave window with the requested attributes.
- */
-static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- dma_addr_t pci_base, u32 aspace, u32 cycle)
-{
- unsigned int i, addr = 0, granularity = 0;
- unsigned int temp_ctl = 0;
- unsigned int vme_base_low, vme_base_high;
- unsigned int vme_bound_low, vme_bound_high;
- unsigned int pci_offset_low, pci_offset_high;
- unsigned long long vme_bound, pci_offset;
- struct vme_bridge *tsi148_bridge;
- struct tsi148_driver *bridge;
-
- tsi148_bridge = image->parent;
- bridge = tsi148_bridge->driver_priv;
-
- i = image->number;
-
- switch (aspace) {
- case VME_A16:
- granularity = 0x10;
- addr |= TSI148_LCSR_ITAT_AS_A16;
- break;
- case VME_A24:
- granularity = 0x1000;
- addr |= TSI148_LCSR_ITAT_AS_A24;
- break;
- case VME_A32:
- granularity = 0x10000;
- addr |= TSI148_LCSR_ITAT_AS_A32;
- break;
- case VME_A64:
- granularity = 0x10000;
- addr |= TSI148_LCSR_ITAT_AS_A64;
- break;
- case VME_CRCSR:
- case VME_USER1:
- case VME_USER2:
- case VME_USER3:
- case VME_USER4:
- default:
- dev_err(tsi148_bridge->parent, "Invalid address space\n");
- return -EINVAL;
- break;
- }
-
- /* Convert 64-bit variables to 2x 32-bit variables */
- reg_split(vme_base, &vme_base_high, &vme_base_low);
-
- /*
- * Bound address is a valid address for the window, adjust
- * accordingly
- */
- vme_bound = vme_base + size - granularity;
- reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
- pci_offset = (unsigned long long)pci_base - vme_base;
- reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
-
- if (vme_base_low & (granularity - 1)) {
- dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
- return -EINVAL;
- }
- if (vme_bound_low & (granularity - 1)) {
- dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
- return -EINVAL;
- }
- if (pci_offset_low & (granularity - 1)) {
- dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
- "alignment\n");
- return -EINVAL;
- }
-
- /* Disable while we are mucking around */
- temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITAT);
- temp_ctl &= ~TSI148_LCSR_ITAT_EN;
- iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITAT);
-
- /* Setup mapping */
- iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITSAU);
- iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITSAL);
- iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITEAU);
- iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITEAL);
- iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITOFU);
- iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITOFL);
-
- /* Setup 2eSST speeds */
- temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
- switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
- case VME_2eSST160:
- temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
- break;
- case VME_2eSST267:
- temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
- break;
- case VME_2eSST320:
- temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
- break;
- }
-
- /* Setup cycle types */
- temp_ctl &= ~(0x1F << 7);
- if (cycle & VME_BLT)
- temp_ctl |= TSI148_LCSR_ITAT_BLT;
- if (cycle & VME_MBLT)
- temp_ctl |= TSI148_LCSR_ITAT_MBLT;
- if (cycle & VME_2eVME)
- temp_ctl |= TSI148_LCSR_ITAT_2eVME;
- if (cycle & VME_2eSST)
- temp_ctl |= TSI148_LCSR_ITAT_2eSST;
- if (cycle & VME_2eSSTB)
- temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
-
- /* Setup address space */
- temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
- temp_ctl |= addr;
-
- temp_ctl &= ~0xF;
- if (cycle & VME_SUPER)
- temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
- if (cycle & VME_USER)
- temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
- if (cycle & VME_PROG)
- temp_ctl |= TSI148_LCSR_ITAT_PGM;
- if (cycle & VME_DATA)
- temp_ctl |= TSI148_LCSR_ITAT_DATA;
-
- /* Write ctl reg without enable */
- iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITAT);
-
- if (enabled)
- temp_ctl |= TSI148_LCSR_ITAT_EN;
-
- iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITAT);
-
- return 0;
-}
-
-/*
- * Get slave window configuration.
- */
-static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
-{
- unsigned int i, granularity = 0, ctl = 0;
- unsigned int vme_base_low, vme_base_high;
- unsigned int vme_bound_low, vme_bound_high;
- unsigned int pci_offset_low, pci_offset_high;
- unsigned long long vme_bound, pci_offset;
- struct tsi148_driver *bridge;
-
- bridge = image->parent->driver_priv;
-
- i = image->number;
-
- /* Read registers */
- ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITAT);
-
- vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITSAU);
- vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITSAL);
- vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITEAU);
- vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITEAL);
- pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITOFU);
- pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITOFL);
-
- /* Convert 64-bit variables to 2x 32-bit variables */
- reg_join(vme_base_high, vme_base_low, vme_base);
- reg_join(vme_bound_high, vme_bound_low, &vme_bound);
- reg_join(pci_offset_high, pci_offset_low, &pci_offset);
-
- *pci_base = (dma_addr_t)vme_base + pci_offset;
-
- *enabled = 0;
- *aspace = 0;
- *cycle = 0;
-
- if (ctl & TSI148_LCSR_ITAT_EN)
- *enabled = 1;
-
- if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
- granularity = 0x10;
- *aspace |= VME_A16;
- }
- if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
- granularity = 0x1000;
- *aspace |= VME_A24;
- }
- if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
- granularity = 0x10000;
- *aspace |= VME_A32;
- }
- if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
- granularity = 0x10000;
- *aspace |= VME_A64;
- }
-
- /* Need granularity before we set the size */
- *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
-
-
- if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
- *cycle |= VME_2eSST160;
- if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
- *cycle |= VME_2eSST267;
- if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
- *cycle |= VME_2eSST320;
-
- if (ctl & TSI148_LCSR_ITAT_BLT)
- *cycle |= VME_BLT;
- if (ctl & TSI148_LCSR_ITAT_MBLT)
- *cycle |= VME_MBLT;
- if (ctl & TSI148_LCSR_ITAT_2eVME)
- *cycle |= VME_2eVME;
- if (ctl & TSI148_LCSR_ITAT_2eSST)
- *cycle |= VME_2eSST;
- if (ctl & TSI148_LCSR_ITAT_2eSSTB)
- *cycle |= VME_2eSSTB;
-
- if (ctl & TSI148_LCSR_ITAT_SUPR)
- *cycle |= VME_SUPER;
- if (ctl & TSI148_LCSR_ITAT_NPRIV)
- *cycle |= VME_USER;
- if (ctl & TSI148_LCSR_ITAT_PGM)
- *cycle |= VME_PROG;
- if (ctl & TSI148_LCSR_ITAT_DATA)
- *cycle |= VME_DATA;
-
- return 0;
-}
-
-/*
- * Allocate and map PCI Resource
- */
-static int tsi148_alloc_resource(struct vme_master_resource *image,
- unsigned long long size)
-{
- unsigned long long existing_size;
- int retval = 0;
- struct pci_dev *pdev;
- struct vme_bridge *tsi148_bridge;
-
- tsi148_bridge = image->parent;
-
- pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
-
- existing_size = (unsigned long long)(image->bus_resource.end -
- image->bus_resource.start);
-
- /* If the existing size is OK, return */
- if ((size != 0) && (existing_size == (size - 1)))
- return 0;
-
- if (existing_size != 0) {
- iounmap(image->kern_base);
- image->kern_base = NULL;
- kfree(image->bus_resource.name);
- release_resource(&image->bus_resource);
- memset(&image->bus_resource, 0, sizeof(struct resource));
- }
-
- /* Exit here if size is zero */
- if (size == 0)
- return 0;
-
- if (image->bus_resource.name == NULL) {
- image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
- if (image->bus_resource.name == NULL) {
- dev_err(tsi148_bridge->parent, "Unable to allocate "
- "memory for resource name\n");
- retval = -ENOMEM;
- goto err_name;
- }
- }
-
- sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
- image->number);
-
- image->bus_resource.start = 0;
- image->bus_resource.end = (unsigned long)size;
- image->bus_resource.flags = IORESOURCE_MEM;
-
- retval = pci_bus_alloc_resource(pdev->bus,
- &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
- 0, NULL, NULL);
- if (retval) {
- dev_err(tsi148_bridge->parent, "Failed to allocate mem "
- "resource for window %d size 0x%lx start 0x%lx\n",
- image->number, (unsigned long)size,
- (unsigned long)image->bus_resource.start);
- goto err_resource;
- }
-
- image->kern_base = ioremap_nocache(
- image->bus_resource.start, size);
- if (image->kern_base == NULL) {
- dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
- retval = -ENOMEM;
- goto err_remap;
- }
-
- return 0;
-
-err_remap:
- release_resource(&image->bus_resource);
-err_resource:
- kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
-err_name:
- return retval;
-}
-
-/*
- * Free and unmap PCI Resource
- */
-static void tsi148_free_resource(struct vme_master_resource *image)
-{
- iounmap(image->kern_base);
- image->kern_base = NULL;
- release_resource(&image->bus_resource);
- kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
-}
-
-/*
- * Set the attributes of an outbound window.
- */
-static int tsi148_master_set(struct vme_master_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size, u32 aspace,
- u32 cycle, u32 dwidth)
-{
- int retval = 0;
- unsigned int i;
- unsigned int temp_ctl = 0;
- unsigned int pci_base_low, pci_base_high;
- unsigned int pci_bound_low, pci_bound_high;
- unsigned int vme_offset_low, vme_offset_high;
- unsigned long long pci_bound, vme_offset, pci_base;
- struct vme_bridge *tsi148_bridge;
- struct tsi148_driver *bridge;
-
- tsi148_bridge = image->parent;
-
- bridge = tsi148_bridge->driver_priv;
-
- /* Verify input data */
- if (vme_base & 0xFFFF) {
- dev_err(tsi148_bridge->parent, "Invalid VME Window "
- "alignment\n");
- retval = -EINVAL;
- goto err_window;
- }
-
- if ((size == 0) && (enabled != 0)) {
- dev_err(tsi148_bridge->parent, "Size must be non-zero for "
- "enabled windows\n");
- retval = -EINVAL;
- goto err_window;
- }
-
- spin_lock(&image->lock);
-
- /* Let's allocate the resource here rather than further up the stack as
- * it avoids pushing loads of bus dependent stuff up the stack. If size
- * is zero, any existing resource will be freed.
- */
- retval = tsi148_alloc_resource(image, size);
- if (retval) {
- spin_unlock(&image->lock);
- dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
- "resource\n");
- goto err_res;
- }
-
- if (size == 0) {
- pci_base = 0;
- pci_bound = 0;
- vme_offset = 0;
- } else {
- pci_base = (unsigned long long)image->bus_resource.start;
-
- /*
- * Bound address is a valid address for the window, adjust
- * according to window granularity.
- */
- pci_bound = pci_base + (size - 0x10000);
- vme_offset = vme_base - pci_base;
- }
-
- /* Convert 64-bit variables to 2x 32-bit variables */
- reg_split(pci_base, &pci_base_high, &pci_base_low);
- reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
- reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
-
- if (pci_base_low & 0xFFFF) {
- spin_unlock(&image->lock);
- dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
- retval = -EINVAL;
- goto err_gran;
- }
- if (pci_bound_low & 0xFFFF) {
- spin_unlock(&image->lock);
- dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
- retval = -EINVAL;
- goto err_gran;
- }
- if (vme_offset_low & 0xFFFF) {
- spin_unlock(&image->lock);
- dev_err(tsi148_bridge->parent, "Invalid VME Offset "
- "alignment\n");
- retval = -EINVAL;
- goto err_gran;
- }
-
- i = image->number;
-
- /* Disable while we are mucking around */
- temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTAT);
- temp_ctl &= ~TSI148_LCSR_OTAT_EN;
- iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTAT);
-
- /* Setup 2eSST speeds */
- temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
- switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
- case VME_2eSST160:
- temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
- break;
- case VME_2eSST267:
- temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
- break;
- case VME_2eSST320:
- temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
- break;
- }
-
- /* Setup cycle types */
- if (cycle & VME_BLT) {
- temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
- temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
- }
- if (cycle & VME_MBLT) {
- temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
- temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
- }
- if (cycle & VME_2eVME) {
- temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
- temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
- }
- if (cycle & VME_2eSST) {
- temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
- temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
- }
- if (cycle & VME_2eSSTB) {
- dev_warn(tsi148_bridge->parent, "Currently not setting "
- "Broadcast Select Registers\n");
- temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
- temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
- }
-
- /* Setup data width */
- temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
- switch (dwidth) {
- case VME_D16:
- temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
- break;
- case VME_D32:
- temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
- break;
- default:
- spin_unlock(&image->lock);
- dev_err(tsi148_bridge->parent, "Invalid data width\n");
- retval = -EINVAL;
- goto err_dwidth;
- }
-
- /* Setup address space */
- temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
- switch (aspace) {
- case VME_A16:
- temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
- break;
- case VME_A24:
- temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
- break;
- case VME_A32:
- temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
- break;
- case VME_A64:
- temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
- break;
- case VME_CRCSR:
- temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
- break;
- case VME_USER1:
- temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
- break;
- case VME_USER2:
- temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
- break;
- case VME_USER3:
- temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
- break;
- case VME_USER4:
- temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
- break;
- default:
- spin_unlock(&image->lock);
- dev_err(tsi148_bridge->parent, "Invalid address space\n");
- retval = -EINVAL;
- goto err_aspace;
- break;
- }
-
- temp_ctl &= ~(3<<4);
- if (cycle & VME_SUPER)
- temp_ctl |= TSI148_LCSR_OTAT_SUP;
- if (cycle & VME_PROG)
- temp_ctl |= TSI148_LCSR_OTAT_PGM;
-
- /* Setup mapping */
- iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTSAU);
- iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTSAL);
- iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTEAU);
- iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTEAL);
- iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTOFU);
- iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTOFL);
-
- /* Write ctl reg without enable */
- iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTAT);
-
- if (enabled)
- temp_ctl |= TSI148_LCSR_OTAT_EN;
-
- iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTAT);
-
- spin_unlock(&image->lock);
- return 0;
-
-err_aspace:
-err_dwidth:
-err_gran:
- tsi148_free_resource(image);
-err_res:
-err_window:
- return retval;
-
-}
-
-/*
- * Set the attributes of an outbound window.
- *
- * XXX Not parsing prefetch information.
- */
-static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
- u32 *cycle, u32 *dwidth)
-{
- unsigned int i, ctl;
- unsigned int pci_base_low, pci_base_high;
- unsigned int pci_bound_low, pci_bound_high;
- unsigned int vme_offset_low, vme_offset_high;
-
- unsigned long long pci_base, pci_bound, vme_offset;
- struct tsi148_driver *bridge;
-
- bridge = image->parent->driver_priv;
-
- i = image->number;
-
- ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTAT);
-
- pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTSAU);
- pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTSAL);
- pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTEAU);
- pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTEAL);
- vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTOFU);
- vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTOFL);
-
- /* Convert 64-bit variables to 2x 32-bit variables */
- reg_join(pci_base_high, pci_base_low, &pci_base);
- reg_join(pci_bound_high, pci_bound_low, &pci_bound);
- reg_join(vme_offset_high, vme_offset_low, &vme_offset);
-
- *vme_base = pci_base + vme_offset;
- *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
-
- *enabled = 0;
- *aspace = 0;
- *cycle = 0;
- *dwidth = 0;
-
- if (ctl & TSI148_LCSR_OTAT_EN)
- *enabled = 1;
-
- /* Setup address space */
- if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
- *aspace |= VME_A16;
- if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
- *aspace |= VME_A24;
- if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
- *aspace |= VME_A32;
- if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
- *aspace |= VME_A64;
- if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
- *aspace |= VME_CRCSR;
- if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
- *aspace |= VME_USER1;
- if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
- *aspace |= VME_USER2;
- if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
- *aspace |= VME_USER3;
- if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
- *aspace |= VME_USER4;
-
- /* Setup 2eSST speeds */
- if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
- *cycle |= VME_2eSST160;
- if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
- *cycle |= VME_2eSST267;
- if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
- *cycle |= VME_2eSST320;
-
- /* Setup cycle types */
- if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
- *cycle |= VME_SCT;
- if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
- *cycle |= VME_BLT;
- if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
- *cycle |= VME_MBLT;
- if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
- *cycle |= VME_2eVME;
- if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
- *cycle |= VME_2eSST;
- if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
- *cycle |= VME_2eSSTB;
-
- if (ctl & TSI148_LCSR_OTAT_SUP)
- *cycle |= VME_SUPER;
- else
- *cycle |= VME_USER;
-
- if (ctl & TSI148_LCSR_OTAT_PGM)
- *cycle |= VME_PROG;
- else
- *cycle |= VME_DATA;
-
- /* Setup data width */
- if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
- *dwidth = VME_D16;
- if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
- *dwidth = VME_D32;
-
- return 0;
-}
-
-
-static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
- u32 *cycle, u32 *dwidth)
-{
- int retval;
-
- spin_lock(&image->lock);
-
- retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
- cycle, dwidth);
-
- spin_unlock(&image->lock);
-
- return retval;
-}
-
-static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
- size_t count, loff_t offset)
-{
- int retval, enabled;
- unsigned long long vme_base, size;
- u32 aspace, cycle, dwidth;
- struct vme_bus_error *vme_err = NULL;
- struct vme_bridge *tsi148_bridge;
-
- tsi148_bridge = image->parent;
-
- spin_lock(&image->lock);
-
- memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
- retval = count;
-
- if (!err_chk)
- goto skip_chk;
-
- __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
- &dwidth);
-
- vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
- count);
- if (vme_err != NULL) {
- dev_err(image->parent->parent, "First VME read error detected "
- "an at address 0x%llx\n", vme_err->address);
- retval = vme_err->address - (vme_base + offset);
- /* Clear down save errors in this address range */
- tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
- count);
- }
-
-skip_chk:
- spin_unlock(&image->lock);
-
- return retval;
-}
-
-
-static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
- size_t count, loff_t offset)
-{
- int retval = 0, enabled;
- unsigned long long vme_base, size;
- u32 aspace, cycle, dwidth;
-
- struct vme_bus_error *vme_err = NULL;
- struct vme_bridge *tsi148_bridge;
- struct tsi148_driver *bridge;
-
- tsi148_bridge = image->parent;
-
- bridge = tsi148_bridge->driver_priv;
-
- spin_lock(&image->lock);
-
- memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
- retval = count;
-
- /*
- * Writes are posted. We need to do a read on the VME bus to flush out
- * all of the writes before we check for errors. We can't guarantee
- * that reading the data we have just written is safe. It is believed
- * that there isn't any read, write re-ordering, so we can read any
- * location in VME space, so lets read the Device ID from the tsi148's
- * own registers as mapped into CR/CSR space.
- *
- * We check for saved errors in the written address range/space.
- */
-
- if (!err_chk)
- goto skip_chk;
-
- /*
- * Get window info first, to maximise the time that the buffers may
- * fluch on their own
- */
- __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
- &dwidth);
-
- ioread16(bridge->flush_image->kern_base + 0x7F000);
-
- vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
- count);
- if (vme_err != NULL) {
- dev_warn(tsi148_bridge->parent, "First VME write error detected"
- " an at address 0x%llx\n", vme_err->address);
- retval = vme_err->address - (vme_base + offset);
- /* Clear down save errors in this address range */
- tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
- count);
- }
-
-skip_chk:
- spin_unlock(&image->lock);
-
- return retval;
-}
-
-/*
- * Perform an RMW cycle on the VME bus.
- *
- * Requires a previously configured master window, returns final value.
- */
-static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
- unsigned int mask, unsigned int compare, unsigned int swap,
- loff_t offset)
-{
- unsigned long long pci_addr;
- unsigned int pci_addr_high, pci_addr_low;
- u32 tmp, result;
- int i;
- struct tsi148_driver *bridge;
-
- bridge = image->parent->driver_priv;
-
- /* Find the PCI address that maps to the desired VME address */
- i = image->number;
-
- /* Locking as we can only do one of these at a time */
- mutex_lock(&bridge->vme_rmw);
-
- /* Lock image */
- spin_lock(&image->lock);
-
- pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTSAU);
- pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTSAL);
-
- reg_join(pci_addr_high, pci_addr_low, &pci_addr);
- reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
-
- /* Configure registers */
- iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
- iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
- iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
- iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
- iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
-
- /* Enable RMW */
- tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
- tmp |= TSI148_LCSR_VMCTRL_RMWEN;
- iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
-
- /* Kick process off with a read to the required address. */
- result = ioread32be(image->kern_base + offset);
-
- /* Disable RMW */
- tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
- tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
- iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
-
- spin_unlock(&image->lock);
-
- mutex_unlock(&bridge->vme_rmw);
-
- return result;
-}
-
-static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
- u32 aspace, u32 cycle, u32 dwidth)
-{
- u32 val;
-
- val = be32_to_cpu(*attr);
-
- /* Setup 2eSST speeds */
- switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
- case VME_2eSST160:
- val |= TSI148_LCSR_DSAT_2eSSTM_160;
- break;
- case VME_2eSST267:
- val |= TSI148_LCSR_DSAT_2eSSTM_267;
- break;
- case VME_2eSST320:
- val |= TSI148_LCSR_DSAT_2eSSTM_320;
- break;
- }
-
- /* Setup cycle types */
- if (cycle & VME_SCT)
- val |= TSI148_LCSR_DSAT_TM_SCT;
-
- if (cycle & VME_BLT)
- val |= TSI148_LCSR_DSAT_TM_BLT;
-
- if (cycle & VME_MBLT)
- val |= TSI148_LCSR_DSAT_TM_MBLT;
-
- if (cycle & VME_2eVME)
- val |= TSI148_LCSR_DSAT_TM_2eVME;
-
- if (cycle & VME_2eSST)
- val |= TSI148_LCSR_DSAT_TM_2eSST;
-
- if (cycle & VME_2eSSTB) {
- dev_err(dev, "Currently not setting Broadcast Select "
- "Registers\n");
- val |= TSI148_LCSR_DSAT_TM_2eSSTB;
- }
-
- /* Setup data width */
- switch (dwidth) {
- case VME_D16:
- val |= TSI148_LCSR_DSAT_DBW_16;
- break;
- case VME_D32:
- val |= TSI148_LCSR_DSAT_DBW_32;
- break;
- default:
- dev_err(dev, "Invalid data width\n");
- return -EINVAL;
- }
-
- /* Setup address space */
- switch (aspace) {
- case VME_A16:
- val |= TSI148_LCSR_DSAT_AMODE_A16;
- break;
- case VME_A24:
- val |= TSI148_LCSR_DSAT_AMODE_A24;
- break;
- case VME_A32:
- val |= TSI148_LCSR_DSAT_AMODE_A32;
- break;
- case VME_A64:
- val |= TSI148_LCSR_DSAT_AMODE_A64;
- break;
- case VME_CRCSR:
- val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
- break;
- case VME_USER1:
- val |= TSI148_LCSR_DSAT_AMODE_USER1;
- break;
- case VME_USER2:
- val |= TSI148_LCSR_DSAT_AMODE_USER2;
- break;
- case VME_USER3:
- val |= TSI148_LCSR_DSAT_AMODE_USER3;
- break;
- case VME_USER4:
- val |= TSI148_LCSR_DSAT_AMODE_USER4;
- break;
- default:
- dev_err(dev, "Invalid address space\n");
- return -EINVAL;
- break;
- }
-
- if (cycle & VME_SUPER)
- val |= TSI148_LCSR_DSAT_SUP;
- if (cycle & VME_PROG)
- val |= TSI148_LCSR_DSAT_PGM;
-
- *attr = cpu_to_be32(val);
-
- return 0;
-}
-
-static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
- u32 aspace, u32 cycle, u32 dwidth)
-{
- u32 val;
-
- val = be32_to_cpu(*attr);
-
- /* Setup 2eSST speeds */
- switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
- case VME_2eSST160:
- val |= TSI148_LCSR_DDAT_2eSSTM_160;
- break;
- case VME_2eSST267:
- val |= TSI148_LCSR_DDAT_2eSSTM_267;
- break;
- case VME_2eSST320:
- val |= TSI148_LCSR_DDAT_2eSSTM_320;
- break;
- }
-
- /* Setup cycle types */
- if (cycle & VME_SCT)
- val |= TSI148_LCSR_DDAT_TM_SCT;
-
- if (cycle & VME_BLT)
- val |= TSI148_LCSR_DDAT_TM_BLT;
-
- if (cycle & VME_MBLT)
- val |= TSI148_LCSR_DDAT_TM_MBLT;
-
- if (cycle & VME_2eVME)
- val |= TSI148_LCSR_DDAT_TM_2eVME;
-
- if (cycle & VME_2eSST)
- val |= TSI148_LCSR_DDAT_TM_2eSST;
-
- if (cycle & VME_2eSSTB) {
- dev_err(dev, "Currently not setting Broadcast Select "
- "Registers\n");
- val |= TSI148_LCSR_DDAT_TM_2eSSTB;
- }
-
- /* Setup data width */
- switch (dwidth) {
- case VME_D16:
- val |= TSI148_LCSR_DDAT_DBW_16;
- break;
- case VME_D32:
- val |= TSI148_LCSR_DDAT_DBW_32;
- break;
- default:
- dev_err(dev, "Invalid data width\n");
- return -EINVAL;
- }
-
- /* Setup address space */
- switch (aspace) {
- case VME_A16:
- val |= TSI148_LCSR_DDAT_AMODE_A16;
- break;
- case VME_A24:
- val |= TSI148_LCSR_DDAT_AMODE_A24;
- break;
- case VME_A32:
- val |= TSI148_LCSR_DDAT_AMODE_A32;
- break;
- case VME_A64:
- val |= TSI148_LCSR_DDAT_AMODE_A64;
- break;
- case VME_CRCSR:
- val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
- break;
- case VME_USER1:
- val |= TSI148_LCSR_DDAT_AMODE_USER1;
- break;
- case VME_USER2:
- val |= TSI148_LCSR_DDAT_AMODE_USER2;
- break;
- case VME_USER3:
- val |= TSI148_LCSR_DDAT_AMODE_USER3;
- break;
- case VME_USER4:
- val |= TSI148_LCSR_DDAT_AMODE_USER4;
- break;
- default:
- dev_err(dev, "Invalid address space\n");
- return -EINVAL;
- break;
- }
-
- if (cycle & VME_SUPER)
- val |= TSI148_LCSR_DDAT_SUP;
- if (cycle & VME_PROG)
- val |= TSI148_LCSR_DDAT_PGM;
-
- *attr = cpu_to_be32(val);
-
- return 0;
-}
-
-/*
- * Add a link list descriptor to the list
- *
- * Note: DMA engine expects the DMA descriptor to be big endian.
- */
-static int tsi148_dma_list_add(struct vme_dma_list *list,
- struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
-{
- struct tsi148_dma_entry *entry, *prev;
- u32 address_high, address_low, val;
- struct vme_dma_pattern *pattern_attr;
- struct vme_dma_pci *pci_attr;
- struct vme_dma_vme *vme_attr;
- int retval = 0;
- struct vme_bridge *tsi148_bridge;
-
- tsi148_bridge = list->parent->parent;
-
- /* Descriptor must be aligned on 64-bit boundaries */
- entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
- if (entry == NULL) {
- dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
- "dma resource structure\n");
- retval = -ENOMEM;
- goto err_mem;
- }
-
- /* Test descriptor alignment */
- if ((unsigned long)&entry->descriptor & 0x7) {
- dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
- "byte boundary as required: %p\n",
- &entry->descriptor);
- retval = -EINVAL;
- goto err_align;
- }
-
- /* Given we are going to fill out the structure, we probably don't
- * need to zero it, but better safe than sorry for now.
- */
- memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
-
- /* Fill out source part */
- switch (src->type) {
- case VME_DMA_PATTERN:
- pattern_attr = src->private;
-
- entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
-
- val = TSI148_LCSR_DSAT_TYP_PAT;
-
- /* Default behaviour is 32 bit pattern */
- if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
- val |= TSI148_LCSR_DSAT_PSZ;
-
- /* It seems that the default behaviour is to increment */
- if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
- val |= TSI148_LCSR_DSAT_NIN;
- entry->descriptor.dsat = cpu_to_be32(val);
- break;
- case VME_DMA_PCI:
- pci_attr = src->private;
-
- reg_split((unsigned long long)pci_attr->address, &address_high,
- &address_low);
- entry->descriptor.dsau = cpu_to_be32(address_high);
- entry->descriptor.dsal = cpu_to_be32(address_low);
- entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
- break;
- case VME_DMA_VME:
- vme_attr = src->private;
-
- reg_split((unsigned long long)vme_attr->address, &address_high,
- &address_low);
- entry->descriptor.dsau = cpu_to_be32(address_high);
- entry->descriptor.dsal = cpu_to_be32(address_low);
- entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
-
- retval = tsi148_dma_set_vme_src_attributes(
- tsi148_bridge->parent, &entry->descriptor.dsat,
- vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
- if (retval < 0)
- goto err_source;
- break;
- default:
- dev_err(tsi148_bridge->parent, "Invalid source type\n");
- retval = -EINVAL;
- goto err_source;
- break;
- }
-
- /* Assume last link - this will be over-written by adding another */
- entry->descriptor.dnlau = cpu_to_be32(0);
- entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
-
- /* Fill out destination part */
- switch (dest->type) {
- case VME_DMA_PCI:
- pci_attr = dest->private;
-
- reg_split((unsigned long long)pci_attr->address, &address_high,
- &address_low);
- entry->descriptor.ddau = cpu_to_be32(address_high);
- entry->descriptor.ddal = cpu_to_be32(address_low);
- entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
- break;
- case VME_DMA_VME:
- vme_attr = dest->private;
-
- reg_split((unsigned long long)vme_attr->address, &address_high,
- &address_low);
- entry->descriptor.ddau = cpu_to_be32(address_high);
- entry->descriptor.ddal = cpu_to_be32(address_low);
- entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
-
- retval = tsi148_dma_set_vme_dest_attributes(
- tsi148_bridge->parent, &entry->descriptor.ddat,
- vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
- if (retval < 0)
- goto err_dest;
- break;
- default:
- dev_err(tsi148_bridge->parent, "Invalid destination type\n");
- retval = -EINVAL;
- goto err_dest;
- break;
- }
-
- /* Fill out count */
- entry->descriptor.dcnt = cpu_to_be32((u32)count);
-
- /* Add to list */
- list_add_tail(&entry->list, &list->entries);
-
- /* Fill out previous descriptors "Next Address" */
- if (entry->list.prev != &list->entries) {
- prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
- list);
- /* We need the bus address for the pointer */
- entry->dma_handle = dma_map_single(tsi148_bridge->parent,
- &entry->descriptor,
- sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
-
- reg_split((unsigned long long)entry->dma_handle, &address_high,
- &address_low);
- entry->descriptor.dnlau = cpu_to_be32(address_high);
- entry->descriptor.dnlal = cpu_to_be32(address_low);
-
- }
-
- return 0;
-
-err_dest:
-err_source:
-err_align:
- kfree(entry);
-err_mem:
- return retval;
-}
-
-/*
- * Check to see if the provided DMA channel is busy.
- */
-static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
-{
- u32 tmp;
- struct tsi148_driver *bridge;
-
- bridge = tsi148_bridge->driver_priv;
-
- tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
- TSI148_LCSR_OFFSET_DSTA);
-
- if (tmp & TSI148_LCSR_DSTA_BSY)
- return 0;
- else
- return 1;
-
-}
-
-/*
- * Execute a previously generated link list
- *
- * XXX Need to provide control register configuration.
- */
-static int tsi148_dma_list_exec(struct vme_dma_list *list)
-{
- struct vme_dma_resource *ctrlr;
- int channel, retval = 0;
- struct tsi148_dma_entry *entry;
- u32 bus_addr_high, bus_addr_low;
- u32 val, dctlreg = 0;
- struct vme_bridge *tsi148_bridge;
- struct tsi148_driver *bridge;
-
- ctrlr = list->parent;
-
- tsi148_bridge = ctrlr->parent;
-
- bridge = tsi148_bridge->driver_priv;
-
- mutex_lock(&ctrlr->mtx);
-
- channel = ctrlr->number;
-
- if (!list_empty(&ctrlr->running)) {
- /*
- * XXX We have an active DMA transfer and currently haven't
- * sorted out the mechanism for "pending" DMA transfers.
- * Return busy.
- */
- /* Need to add to pending here */
- mutex_unlock(&ctrlr->mtx);
- return -EBUSY;
- } else {
- list_add(&list->list, &ctrlr->running);
- }
-
- /* Get first bus address and write into registers */
- entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
- list);
-
- entry->dma_handle = dma_map_single(tsi148_bridge->parent,
- &entry->descriptor,
- sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
-
- mutex_unlock(&ctrlr->mtx);
-
- reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
-
- iowrite32be(bus_addr_high, bridge->base +
- TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
- iowrite32be(bus_addr_low, bridge->base +
- TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
-
- dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
- TSI148_LCSR_OFFSET_DCTL);
-
- /* Start the operation */
- iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
- TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
-
- wait_event_interruptible(bridge->dma_queue[channel],
- tsi148_dma_busy(ctrlr->parent, channel));
-
- /*
- * Read status register, this register is valid until we kick off a
- * new transfer.
- */
- val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
- TSI148_LCSR_OFFSET_DSTA);
-
- if (val & TSI148_LCSR_DSTA_VBE) {
- dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
- retval = -EIO;
- }
-
- /* Remove list from running list */
- mutex_lock(&ctrlr->mtx);
- list_del(&list->list);
- mutex_unlock(&ctrlr->mtx);
-
- return retval;
-}
-
-/*
- * Clean up a previously generated link list
- *
- * We have a separate function, don't assume that the chain can't be reused.
- */
-static int tsi148_dma_list_empty(struct vme_dma_list *list)
-{
- struct list_head *pos, *temp;
- struct tsi148_dma_entry *entry;
-
- struct vme_bridge *tsi148_bridge = list->parent->parent;
-
- /* detach and free each entry */
- list_for_each_safe(pos, temp, &list->entries) {
- list_del(pos);
- entry = list_entry(pos, struct tsi148_dma_entry, list);
-
- dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
- sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
- kfree(entry);
- }
-
- return 0;
-}
-
-/*
- * All 4 location monitors reside at the same base - this is therefore a
- * system wide configuration.
- *
- * This does not enable the LM monitor - that should be done when the first
- * callback is attached and disabled when the last callback is removed.
- */
-static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
- u32 aspace, u32 cycle)
-{
- u32 lm_base_high, lm_base_low, lm_ctl = 0;
- int i;
- struct vme_bridge *tsi148_bridge;
- struct tsi148_driver *bridge;
-
- tsi148_bridge = lm->parent;
-
- bridge = tsi148_bridge->driver_priv;
-
- mutex_lock(&lm->mtx);
-
- /* If we already have a callback attached, we can't move it! */
- for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL) {
- mutex_unlock(&lm->mtx);
- dev_err(tsi148_bridge->parent, "Location monitor "
- "callback attached, can't reset\n");
- return -EBUSY;
- }
- }
-
- switch (aspace) {
- case VME_A16:
- lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
- break;
- case VME_A24:
- lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
- break;
- case VME_A32:
- lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
- break;
- case VME_A64:
- lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
- break;
- default:
- mutex_unlock(&lm->mtx);
- dev_err(tsi148_bridge->parent, "Invalid address space\n");
- return -EINVAL;
- break;
- }
-
- if (cycle & VME_SUPER)
- lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
- if (cycle & VME_USER)
- lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
- if (cycle & VME_PROG)
- lm_ctl |= TSI148_LCSR_LMAT_PGM;
- if (cycle & VME_DATA)
- lm_ctl |= TSI148_LCSR_LMAT_DATA;
-
- reg_split(lm_base, &lm_base_high, &lm_base_low);
-
- iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
- iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
- iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
-
- mutex_unlock(&lm->mtx);
-
- return 0;
-}
-
-/* Get configuration of the callback monitor and return whether it is enabled
- * or disabled.
- */
-static int tsi148_lm_get(struct vme_lm_resource *lm,
- unsigned long long *lm_base, u32 *aspace, u32 *cycle)
-{
- u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
- struct tsi148_driver *bridge;
-
- bridge = lm->parent->driver_priv;
-
- mutex_lock(&lm->mtx);
-
- lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
- lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
- lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
-
- reg_join(lm_base_high, lm_base_low, lm_base);
-
- if (lm_ctl & TSI148_LCSR_LMAT_EN)
- enabled = 1;
-
- if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
- *aspace |= VME_A16;
-
- if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
- *aspace |= VME_A24;
-
- if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
- *aspace |= VME_A32;
-
- if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
- *aspace |= VME_A64;
-
-
- if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
- *cycle |= VME_SUPER;
- if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
- *cycle |= VME_USER;
- if (lm_ctl & TSI148_LCSR_LMAT_PGM)
- *cycle |= VME_PROG;
- if (lm_ctl & TSI148_LCSR_LMAT_DATA)
- *cycle |= VME_DATA;
-
- mutex_unlock(&lm->mtx);
-
- return enabled;
-}
-
-/*
- * Attach a callback to a specific location monitor.
- *
- * Callback will be passed the monitor triggered.
- */
-static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
- void (*callback)(int))
-{
- u32 lm_ctl, tmp;
- struct vme_bridge *tsi148_bridge;
- struct tsi148_driver *bridge;
-
- tsi148_bridge = lm->parent;
-
- bridge = tsi148_bridge->driver_priv;
-
- mutex_lock(&lm->mtx);
-
- /* Ensure that the location monitor is configured - need PGM or DATA */
- lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
- if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
- mutex_unlock(&lm->mtx);
- dev_err(tsi148_bridge->parent, "Location monitor not properly "
- "configured\n");
- return -EINVAL;
- }
-
- /* Check that a callback isn't already attached */
- if (bridge->lm_callback[monitor] != NULL) {
- mutex_unlock(&lm->mtx);
- dev_err(tsi148_bridge->parent, "Existing callback attached\n");
- return -EBUSY;
- }
-
- /* Attach callback */
- bridge->lm_callback[monitor] = callback;
-
- /* Enable Location Monitor interrupt */
- tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
- tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
- iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
-
- tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
- tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
- iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
-
- /* Ensure that global Location Monitor Enable set */
- if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
- lm_ctl |= TSI148_LCSR_LMAT_EN;
- iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
- }
-
- mutex_unlock(&lm->mtx);
-
- return 0;
-}
-
-/*
- * Detach a callback function forn a specific location monitor.
- */
-static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
-{
- u32 lm_en, tmp;
- struct tsi148_driver *bridge;
-
- bridge = lm->parent->driver_priv;
-
- mutex_lock(&lm->mtx);
-
- /* Disable Location Monitor and ensure previous interrupts are clear */
- lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
- lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
- iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
-
- tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
- tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
- iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
-
- iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
- bridge->base + TSI148_LCSR_INTC);
-
- /* Detach callback */
- bridge->lm_callback[monitor] = NULL;
-
- /* If all location monitors disabled, disable global Location Monitor */
- if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
- TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
- tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
- tmp &= ~TSI148_LCSR_LMAT_EN;
- iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
- }
-
- mutex_unlock(&lm->mtx);
-
- return 0;
-}
-
-/*
- * Determine Geographical Addressing
- */
-static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
-{
- u32 slot = 0;
- struct tsi148_driver *bridge;
-
- bridge = tsi148_bridge->driver_priv;
-
- if (!geoid) {
- slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
- slot = slot & TSI148_LCSR_VSTAT_GA_M;
- } else
- slot = geoid;
-
- return (int)slot;
-}
-
-void *tsi148_alloc_consistent(struct device *parent, size_t size,
- dma_addr_t *dma)
-{
- struct pci_dev *pdev;
-
- /* Find pci_dev container of dev */
- pdev = container_of(parent, struct pci_dev, dev);
-
- return pci_alloc_consistent(pdev, size, dma);
-}
-
-void tsi148_free_consistent(struct device *parent, size_t size, void *vaddr,
- dma_addr_t dma)
-{
- struct pci_dev *pdev;
-
- /* Find pci_dev container of dev */
- pdev = container_of(parent, struct pci_dev, dev);
-
- pci_free_consistent(pdev, size, vaddr, dma);
-}
-
-static int __init tsi148_init(void)
-{
- return pci_register_driver(&tsi148_driver);
-}
-
-/*
- * Configure CR/CSR space
- *
- * Access to the CR/CSR can be configured at power-up. The location of the
- * CR/CSR registers in the CR/CSR address space is determined by the boards
- * Auto-ID or Geographic address. This function ensures that the window is
- * enabled at an offset consistent with the boards geopgraphic address.
- *
- * Each board has a 512kB window, with the highest 4kB being used for the
- * boards registers, this means there is a fix length 508kB window which must
- * be mapped onto PCI memory.
- */
-static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
- struct pci_dev *pdev)
-{
- u32 cbar, crat, vstat;
- u32 crcsr_bus_high, crcsr_bus_low;
- int retval;
- struct tsi148_driver *bridge;
-
- bridge = tsi148_bridge->driver_priv;
-
- /* Allocate mem for CR/CSR image */
- bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
- &bridge->crcsr_bus);
- if (bridge->crcsr_kernel == NULL) {
- dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
- "CR/CSR image\n");
- return -ENOMEM;
- }
-
- memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
-
- reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
-
- iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
- iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
-
- /* Ensure that the CR/CSR is configured at the correct offset */
- cbar = ioread32be(bridge->base + TSI148_CBAR);
- cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
-
- vstat = tsi148_slot_get(tsi148_bridge);
-
- if (cbar != vstat) {
- cbar = vstat;
- dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
- iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
- }
- dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
-
- crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
- if (crat & TSI148_LCSR_CRAT_EN) {
- dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
- iowrite32be(crat | TSI148_LCSR_CRAT_EN,
- bridge->base + TSI148_LCSR_CRAT);
- } else
- dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
-
- /* If we want flushed, error-checked writes, set up a window
- * over the CR/CSR registers. We read from here to safely flush
- * through VME writes.
- */
- if (err_chk) {
- retval = tsi148_master_set(bridge->flush_image, 1,
- (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
- VME_D16);
- if (retval)
- dev_err(tsi148_bridge->parent, "Configuring flush image"
- " failed\n");
- }
-
- return 0;
-
-}
-
-static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
- struct pci_dev *pdev)
-{
- u32 crat;
- struct tsi148_driver *bridge;
-
- bridge = tsi148_bridge->driver_priv;
-
- /* Turn off CR/CSR space */
- crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
- iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
- bridge->base + TSI148_LCSR_CRAT);
-
- /* Free image */
- iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
- iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
-
- pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
- bridge->crcsr_bus);
-}
-
-static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- int retval, i, master_num;
- u32 data;
- struct list_head *pos = NULL;
- struct vme_bridge *tsi148_bridge;
- struct tsi148_driver *tsi148_device;
- struct vme_master_resource *master_image;
- struct vme_slave_resource *slave_image;
- struct vme_dma_resource *dma_ctrlr;
- struct vme_lm_resource *lm;
-
- /* If we want to support more than one of each bridge, we need to
- * dynamically generate this so we get one per device
- */
- tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
- if (tsi148_bridge == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
- retval = -ENOMEM;
- goto err_struct;
- }
-
- tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
- if (tsi148_device == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
- retval = -ENOMEM;
- goto err_driver;
- }
-
- tsi148_bridge->driver_priv = tsi148_device;
-
- /* Enable the device */
- retval = pci_enable_device(pdev);
- if (retval) {
- dev_err(&pdev->dev, "Unable to enable device\n");
- goto err_enable;
- }
-
- /* Map Registers */
- retval = pci_request_regions(pdev, driver_name);
- if (retval) {
- dev_err(&pdev->dev, "Unable to reserve resources\n");
- goto err_resource;
- }
-
- /* map registers in BAR 0 */
- tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
- 4096);
- if (!tsi148_device->base) {
- dev_err(&pdev->dev, "Unable to remap CRG region\n");
- retval = -EIO;
- goto err_remap;
- }
-
- /* Check to see if the mapping worked out */
- data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
- if (data != PCI_VENDOR_ID_TUNDRA) {
- dev_err(&pdev->dev, "CRG region check failed\n");
- retval = -EIO;
- goto err_test;
- }
-
- /* Initialize wait queues & mutual exclusion flags */
- init_waitqueue_head(&tsi148_device->dma_queue[0]);
- init_waitqueue_head(&tsi148_device->dma_queue[1]);
- init_waitqueue_head(&tsi148_device->iack_queue);
- mutex_init(&tsi148_device->vme_int);
- mutex_init(&tsi148_device->vme_rmw);
-
- tsi148_bridge->parent = &pdev->dev;
- strcpy(tsi148_bridge->name, driver_name);
-
- /* Setup IRQ */
- retval = tsi148_irq_init(tsi148_bridge);
- if (retval != 0) {
- dev_err(&pdev->dev, "Chip Initialization failed.\n");
- goto err_irq;
- }
-
- /* If we are going to flush writes, we need to read from the VME bus.
- * We need to do this safely, thus we read the devices own CR/CSR
- * register. To do this we must set up a window in CR/CSR space and
- * hence have one less master window resource available.
- */
- master_num = TSI148_MAX_MASTER;
- if (err_chk) {
- master_num--;
-
- tsi148_device->flush_image =
- kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
- if (tsi148_device->flush_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "flush resource structure\n");
- retval = -ENOMEM;
- goto err_master;
- }
- tsi148_device->flush_image->parent = tsi148_bridge;
- spin_lock_init(&tsi148_device->flush_image->lock);
- tsi148_device->flush_image->locked = 1;
- tsi148_device->flush_image->number = master_num;
- tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
- VME_A32 | VME_A64;
- tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
- VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
- VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
- VME_USER | VME_PROG | VME_DATA;
- tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
- memset(&tsi148_device->flush_image->bus_resource, 0,
- sizeof(struct resource));
- tsi148_device->flush_image->kern_base = NULL;
- }
-
- /* Add master windows to list */
- INIT_LIST_HEAD(&tsi148_bridge->master_resources);
- for (i = 0; i < master_num; i++) {
- master_image = kmalloc(sizeof(struct vme_master_resource),
- GFP_KERNEL);
- if (master_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "master resource structure\n");
- retval = -ENOMEM;
- goto err_master;
- }
- master_image->parent = tsi148_bridge;
- spin_lock_init(&master_image->lock);
- master_image->locked = 0;
- master_image->number = i;
- master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
- VME_A64;
- master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
- VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
- VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
- VME_PROG | VME_DATA;
- master_image->width_attr = VME_D16 | VME_D32;
- memset(&master_image->bus_resource, 0,
- sizeof(struct resource));
- master_image->kern_base = NULL;
- list_add_tail(&master_image->list,
- &tsi148_bridge->master_resources);
- }
-
- /* Add slave windows to list */
- INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
- for (i = 0; i < TSI148_MAX_SLAVE; i++) {
- slave_image = kmalloc(sizeof(struct vme_slave_resource),
- GFP_KERNEL);
- if (slave_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "slave resource structure\n");
- retval = -ENOMEM;
- goto err_slave;
- }
- slave_image->parent = tsi148_bridge;
- mutex_init(&slave_image->mtx);
- slave_image->locked = 0;
- slave_image->number = i;
- slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
- VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
- VME_USER3 | VME_USER4;
- slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
- VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
- VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
- VME_PROG | VME_DATA;
- list_add_tail(&slave_image->list,
- &tsi148_bridge->slave_resources);
- }
-
- /* Add dma engines to list */
- INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
- for (i = 0; i < TSI148_MAX_DMA; i++) {
- dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
- GFP_KERNEL);
- if (dma_ctrlr == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "dma resource structure\n");
- retval = -ENOMEM;
- goto err_dma;
- }
- dma_ctrlr->parent = tsi148_bridge;
- mutex_init(&dma_ctrlr->mtx);
- dma_ctrlr->locked = 0;
- dma_ctrlr->number = i;
- dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
- VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
- VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
- VME_DMA_PATTERN_TO_MEM;
- INIT_LIST_HEAD(&dma_ctrlr->pending);
- INIT_LIST_HEAD(&dma_ctrlr->running);
- list_add_tail(&dma_ctrlr->list,
- &tsi148_bridge->dma_resources);
- }
-
- /* Add location monitor to list */
- INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
- lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
- if (lm == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "location monitor resource structure\n");
- retval = -ENOMEM;
- goto err_lm;
- }
- lm->parent = tsi148_bridge;
- mutex_init(&lm->mtx);
- lm->locked = 0;
- lm->number = 1;
- lm->monitors = 4;
- list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
-
- tsi148_bridge->slave_get = tsi148_slave_get;
- tsi148_bridge->slave_set = tsi148_slave_set;
- tsi148_bridge->master_get = tsi148_master_get;
- tsi148_bridge->master_set = tsi148_master_set;
- tsi148_bridge->master_read = tsi148_master_read;
- tsi148_bridge->master_write = tsi148_master_write;
- tsi148_bridge->master_rmw = tsi148_master_rmw;
- tsi148_bridge->dma_list_add = tsi148_dma_list_add;
- tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
- tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
- tsi148_bridge->irq_set = tsi148_irq_set;
- tsi148_bridge->irq_generate = tsi148_irq_generate;
- tsi148_bridge->lm_set = tsi148_lm_set;
- tsi148_bridge->lm_get = tsi148_lm_get;
- tsi148_bridge->lm_attach = tsi148_lm_attach;
- tsi148_bridge->lm_detach = tsi148_lm_detach;
- tsi148_bridge->slot_get = tsi148_slot_get;
- tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
- tsi148_bridge->free_consistent = tsi148_free_consistent;
-
- data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
- dev_info(&pdev->dev, "Board is%s the VME system controller\n",
- (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
- if (!geoid)
- dev_info(&pdev->dev, "VME geographical address is %d\n",
- data & TSI148_LCSR_VSTAT_GA_M);
- else
- dev_info(&pdev->dev, "VME geographical address is set to %d\n",
- geoid);
-
- dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
- err_chk ? "enabled" : "disabled");
-
- if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
- dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
- goto err_crcsr;
- }
-
- retval = vme_register_bridge(tsi148_bridge);
- if (retval != 0) {
- dev_err(&pdev->dev, "Chip Registration failed.\n");
- goto err_reg;
- }
-
- pci_set_drvdata(pdev, tsi148_bridge);
-
- /* Clear VME bus "board fail", and "power-up reset" lines */
- data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
- data &= ~TSI148_LCSR_VSTAT_BRDFL;
- data |= TSI148_LCSR_VSTAT_CPURST;
- iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
-
- return 0;
-
-err_reg:
- tsi148_crcsr_exit(tsi148_bridge, pdev);
-err_crcsr:
-err_lm:
- /* resources are stored in link list */
- list_for_each(pos, &tsi148_bridge->lm_resources) {
- lm = list_entry(pos, struct vme_lm_resource, list);
- list_del(pos);
- kfree(lm);
- }
-err_dma:
- /* resources are stored in link list */
- list_for_each(pos, &tsi148_bridge->dma_resources) {
- dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
- list_del(pos);
- kfree(dma_ctrlr);
- }
-err_slave:
- /* resources are stored in link list */
- list_for_each(pos, &tsi148_bridge->slave_resources) {
- slave_image = list_entry(pos, struct vme_slave_resource, list);
- list_del(pos);
- kfree(slave_image);
- }
-err_master:
- /* resources are stored in link list */
- list_for_each(pos, &tsi148_bridge->master_resources) {
- master_image = list_entry(pos, struct vme_master_resource,
- list);
- list_del(pos);
- kfree(master_image);
- }
-
- tsi148_irq_exit(tsi148_bridge, pdev);
-err_irq:
-err_test:
- iounmap(tsi148_device->base);
-err_remap:
- pci_release_regions(pdev);
-err_resource:
- pci_disable_device(pdev);
-err_enable:
- kfree(tsi148_device);
-err_driver:
- kfree(tsi148_bridge);
-err_struct:
- return retval;
-
-}
-
-static void tsi148_remove(struct pci_dev *pdev)
-{
- struct list_head *pos = NULL;
- struct list_head *tmplist;
- struct vme_master_resource *master_image;
- struct vme_slave_resource *slave_image;
- struct vme_dma_resource *dma_ctrlr;
- int i;
- struct tsi148_driver *bridge;
- struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
-
- bridge = tsi148_bridge->driver_priv;
-
-
- dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
-
- /*
- * Shutdown all inbound and outbound windows.
- */
- for (i = 0; i < 8; i++) {
- iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
- TSI148_LCSR_OFFSET_ITAT);
- iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
- TSI148_LCSR_OFFSET_OTAT);
- }
-
- /*
- * Shutdown Location monitor.
- */
- iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
-
- /*
- * Shutdown CRG map.
- */
- iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
-
- /*
- * Clear error status.
- */
- iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
- iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
- iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
-
- /*
- * Remove VIRQ interrupt (if any)
- */
- if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
- iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
-
- /*
- * Map all Interrupts to PCI INTA
- */
- iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
- iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
-
- tsi148_irq_exit(tsi148_bridge, pdev);
-
- vme_unregister_bridge(tsi148_bridge);
-
- tsi148_crcsr_exit(tsi148_bridge, pdev);
-
- /* resources are stored in link list */
- list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
- dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
- list_del(pos);
- kfree(dma_ctrlr);
- }
-
- /* resources are stored in link list */
- list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
- slave_image = list_entry(pos, struct vme_slave_resource, list);
- list_del(pos);
- kfree(slave_image);
- }
-
- /* resources are stored in link list */
- list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
- master_image = list_entry(pos, struct vme_master_resource,
- list);
- list_del(pos);
- kfree(master_image);
- }
-
- iounmap(bridge->base);
-
- pci_release_regions(pdev);
-
- pci_disable_device(pdev);
-
- kfree(tsi148_bridge->driver_priv);
-
- kfree(tsi148_bridge);
-}
-
-static void __exit tsi148_exit(void)
-{
- pci_unregister_driver(&tsi148_driver);
-}
-
-MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
-module_param(err_chk, bool, 0);
-
-MODULE_PARM_DESC(geoid, "Override geographical addressing");
-module_param(geoid, int, 0);
-
-MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
-MODULE_LICENSE("GPL");
-
-module_init(tsi148_init);
-module_exit(tsi148_exit);
+++ /dev/null
-/*
- * tsi148.h
- *
- * Support for the Tundra TSI148 VME Bridge chip
- *
- * Author: Tom Armistead
- * Updated and maintained by Ajit Prem
- * Copyright 2004 Motorola Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef TSI148_H
-#define TSI148_H
-
-#ifndef PCI_VENDOR_ID_TUNDRA
-#define PCI_VENDOR_ID_TUNDRA 0x10e3
-#endif
-
-#ifndef PCI_DEVICE_ID_TUNDRA_TSI148
-#define PCI_DEVICE_ID_TUNDRA_TSI148 0x148
-#endif
-
-/*
- * Define the number of each that the Tsi148 supports.
- */
-#define TSI148_MAX_MASTER 8 /* Max Master Windows */
-#define TSI148_MAX_SLAVE 8 /* Max Slave Windows */
-#define TSI148_MAX_DMA 2 /* Max DMA Controllers */
-#define TSI148_MAX_MAILBOX 4 /* Max Mail Box registers */
-#define TSI148_MAX_SEMAPHORE 8 /* Max Semaphores */
-
-/* Structure used to hold driver specific information */
-struct tsi148_driver {
- void __iomem *base; /* Base Address of device registers */
- wait_queue_head_t dma_queue[2];
- wait_queue_head_t iack_queue;
- void (*lm_callback[4])(int); /* Called in interrupt handler */
- void *crcsr_kernel;
- dma_addr_t crcsr_bus;
- struct vme_master_resource *flush_image;
- struct mutex vme_rmw; /* Only one RMW cycle at a time */
- struct mutex vme_int; /*
- * Only one VME interrupt can be
- * generated at a time, provide locking
- */
-};
-
-/*
- * Layout of a DMAC Linked-List Descriptor
- *
- * Note: This structure is accessed via the chip and therefore must be
- * correctly laid out - It must also be aligned on 64-bit boundaries.
- */
-struct tsi148_dma_descriptor {
- __be32 dsau; /* Source Address */
- __be32 dsal;
- __be32 ddau; /* Destination Address */
- __be32 ddal;
- __be32 dsat; /* Source attributes */
- __be32 ddat; /* Destination attributes */
- __be32 dnlau; /* Next link address */
- __be32 dnlal;
- __be32 dcnt; /* Byte count */
- __be32 ddbs; /* 2eSST Broadcast select */
-};
-
-struct tsi148_dma_entry {
- /*
- * The descriptor needs to be aligned on a 64-bit boundary, we increase
- * the chance of this by putting it first in the structure.
- */
- struct tsi148_dma_descriptor descriptor;
- struct list_head list;
- dma_addr_t dma_handle;
-};
-
-/*
- * TSI148 ASIC register structure overlays and bit field definitions.
- *
- * Note: Tsi148 Register Group (CRG) consists of the following
- * combination of registers:
- * PCFS - PCI Configuration Space Registers
- * LCSR - Local Control and Status Registers
- * GCSR - Global Control and Status Registers
- * CR/CSR - Subset of Configuration ROM /
- * Control and Status Registers
- */
-
-
-/*
- * Command/Status Registers (CRG + $004)
- */
-#define TSI148_PCFS_ID 0x0
-#define TSI148_PCFS_CSR 0x4
-#define TSI148_PCFS_CLASS 0x8
-#define TSI148_PCFS_MISC0 0xC
-#define TSI148_PCFS_MBARL 0x10
-#define TSI148_PCFS_MBARU 0x14
-
-#define TSI148_PCFS_SUBID 0x28
-
-#define TSI148_PCFS_CAPP 0x34
-
-#define TSI148_PCFS_MISC1 0x3C
-
-#define TSI148_PCFS_XCAPP 0x40
-#define TSI148_PCFS_XSTAT 0x44
-
-/*
- * LCSR definitions
- */
-
-/*
- * Outbound Translations
- */
-#define TSI148_LCSR_OT0_OTSAU 0x100
-#define TSI148_LCSR_OT0_OTSAL 0x104
-#define TSI148_LCSR_OT0_OTEAU 0x108
-#define TSI148_LCSR_OT0_OTEAL 0x10C
-#define TSI148_LCSR_OT0_OTOFU 0x110
-#define TSI148_LCSR_OT0_OTOFL 0x114
-#define TSI148_LCSR_OT0_OTBS 0x118
-#define TSI148_LCSR_OT0_OTAT 0x11C
-
-#define TSI148_LCSR_OT1_OTSAU 0x120
-#define TSI148_LCSR_OT1_OTSAL 0x124
-#define TSI148_LCSR_OT1_OTEAU 0x128
-#define TSI148_LCSR_OT1_OTEAL 0x12C
-#define TSI148_LCSR_OT1_OTOFU 0x130
-#define TSI148_LCSR_OT1_OTOFL 0x134
-#define TSI148_LCSR_OT1_OTBS 0x138
-#define TSI148_LCSR_OT1_OTAT 0x13C
-
-#define TSI148_LCSR_OT2_OTSAU 0x140
-#define TSI148_LCSR_OT2_OTSAL 0x144
-#define TSI148_LCSR_OT2_OTEAU 0x148
-#define TSI148_LCSR_OT2_OTEAL 0x14C
-#define TSI148_LCSR_OT2_OTOFU 0x150
-#define TSI148_LCSR_OT2_OTOFL 0x154
-#define TSI148_LCSR_OT2_OTBS 0x158
-#define TSI148_LCSR_OT2_OTAT 0x15C
-
-#define TSI148_LCSR_OT3_OTSAU 0x160
-#define TSI148_LCSR_OT3_OTSAL 0x164
-#define TSI148_LCSR_OT3_OTEAU 0x168
-#define TSI148_LCSR_OT3_OTEAL 0x16C
-#define TSI148_LCSR_OT3_OTOFU 0x170
-#define TSI148_LCSR_OT3_OTOFL 0x174
-#define TSI148_LCSR_OT3_OTBS 0x178
-#define TSI148_LCSR_OT3_OTAT 0x17C
-
-#define TSI148_LCSR_OT4_OTSAU 0x180
-#define TSI148_LCSR_OT4_OTSAL 0x184
-#define TSI148_LCSR_OT4_OTEAU 0x188
-#define TSI148_LCSR_OT4_OTEAL 0x18C
-#define TSI148_LCSR_OT4_OTOFU 0x190
-#define TSI148_LCSR_OT4_OTOFL 0x194
-#define TSI148_LCSR_OT4_OTBS 0x198
-#define TSI148_LCSR_OT4_OTAT 0x19C
-
-#define TSI148_LCSR_OT5_OTSAU 0x1A0
-#define TSI148_LCSR_OT5_OTSAL 0x1A4
-#define TSI148_LCSR_OT5_OTEAU 0x1A8
-#define TSI148_LCSR_OT5_OTEAL 0x1AC
-#define TSI148_LCSR_OT5_OTOFU 0x1B0
-#define TSI148_LCSR_OT5_OTOFL 0x1B4
-#define TSI148_LCSR_OT5_OTBS 0x1B8
-#define TSI148_LCSR_OT5_OTAT 0x1BC
-
-#define TSI148_LCSR_OT6_OTSAU 0x1C0
-#define TSI148_LCSR_OT6_OTSAL 0x1C4
-#define TSI148_LCSR_OT6_OTEAU 0x1C8
-#define TSI148_LCSR_OT6_OTEAL 0x1CC
-#define TSI148_LCSR_OT6_OTOFU 0x1D0
-#define TSI148_LCSR_OT6_OTOFL 0x1D4
-#define TSI148_LCSR_OT6_OTBS 0x1D8
-#define TSI148_LCSR_OT6_OTAT 0x1DC
-
-#define TSI148_LCSR_OT7_OTSAU 0x1E0
-#define TSI148_LCSR_OT7_OTSAL 0x1E4
-#define TSI148_LCSR_OT7_OTEAU 0x1E8
-#define TSI148_LCSR_OT7_OTEAL 0x1EC
-#define TSI148_LCSR_OT7_OTOFU 0x1F0
-#define TSI148_LCSR_OT7_OTOFL 0x1F4
-#define TSI148_LCSR_OT7_OTBS 0x1F8
-#define TSI148_LCSR_OT7_OTAT 0x1FC
-
-#define TSI148_LCSR_OT0 0x100
-#define TSI148_LCSR_OT1 0x120
-#define TSI148_LCSR_OT2 0x140
-#define TSI148_LCSR_OT3 0x160
-#define TSI148_LCSR_OT4 0x180
-#define TSI148_LCSR_OT5 0x1A0
-#define TSI148_LCSR_OT6 0x1C0
-#define TSI148_LCSR_OT7 0x1E0
-
-static const int TSI148_LCSR_OT[8] = { TSI148_LCSR_OT0, TSI148_LCSR_OT1,
- TSI148_LCSR_OT2, TSI148_LCSR_OT3,
- TSI148_LCSR_OT4, TSI148_LCSR_OT5,
- TSI148_LCSR_OT6, TSI148_LCSR_OT7 };
-
-#define TSI148_LCSR_OFFSET_OTSAU 0x0
-#define TSI148_LCSR_OFFSET_OTSAL 0x4
-#define TSI148_LCSR_OFFSET_OTEAU 0x8
-#define TSI148_LCSR_OFFSET_OTEAL 0xC
-#define TSI148_LCSR_OFFSET_OTOFU 0x10
-#define TSI148_LCSR_OFFSET_OTOFL 0x14
-#define TSI148_LCSR_OFFSET_OTBS 0x18
-#define TSI148_LCSR_OFFSET_OTAT 0x1C
-
-/*
- * VMEbus interrupt ack
- * offset 200
- */
-#define TSI148_LCSR_VIACK1 0x204
-#define TSI148_LCSR_VIACK2 0x208
-#define TSI148_LCSR_VIACK3 0x20C
-#define TSI148_LCSR_VIACK4 0x210
-#define TSI148_LCSR_VIACK5 0x214
-#define TSI148_LCSR_VIACK6 0x218
-#define TSI148_LCSR_VIACK7 0x21C
-
-static const int TSI148_LCSR_VIACK[8] = { 0, TSI148_LCSR_VIACK1,
- TSI148_LCSR_VIACK2, TSI148_LCSR_VIACK3,
- TSI148_LCSR_VIACK4, TSI148_LCSR_VIACK5,
- TSI148_LCSR_VIACK6, TSI148_LCSR_VIACK7 };
-
-/*
- * RMW
- * offset 220
- */
-#define TSI148_LCSR_RMWAU 0x220
-#define TSI148_LCSR_RMWAL 0x224
-#define TSI148_LCSR_RMWEN 0x228
-#define TSI148_LCSR_RMWC 0x22C
-#define TSI148_LCSR_RMWS 0x230
-
-/*
- * VMEbus control
- * offset 234
- */
-#define TSI148_LCSR_VMCTRL 0x234
-#define TSI148_LCSR_VCTRL 0x238
-#define TSI148_LCSR_VSTAT 0x23C
-
-/*
- * PCI status
- * offset 240
- */
-#define TSI148_LCSR_PSTAT 0x240
-
-/*
- * VME filter.
- * offset 250
- */
-#define TSI148_LCSR_VMEFL 0x250
-
- /*
- * VME exception.
- * offset 260
- */
-#define TSI148_LCSR_VEAU 0x260
-#define TSI148_LCSR_VEAL 0x264
-#define TSI148_LCSR_VEAT 0x268
-
- /*
- * PCI error
- * offset 270
- */
-#define TSI148_LCSR_EDPAU 0x270
-#define TSI148_LCSR_EDPAL 0x274
-#define TSI148_LCSR_EDPXA 0x278
-#define TSI148_LCSR_EDPXS 0x27C
-#define TSI148_LCSR_EDPAT 0x280
-
- /*
- * Inbound Translations
- * offset 300
- */
-#define TSI148_LCSR_IT0_ITSAU 0x300
-#define TSI148_LCSR_IT0_ITSAL 0x304
-#define TSI148_LCSR_IT0_ITEAU 0x308
-#define TSI148_LCSR_IT0_ITEAL 0x30C
-#define TSI148_LCSR_IT0_ITOFU 0x310
-#define TSI148_LCSR_IT0_ITOFL 0x314
-#define TSI148_LCSR_IT0_ITAT 0x318
-
-#define TSI148_LCSR_IT1_ITSAU 0x320
-#define TSI148_LCSR_IT1_ITSAL 0x324
-#define TSI148_LCSR_IT1_ITEAU 0x328
-#define TSI148_LCSR_IT1_ITEAL 0x32C
-#define TSI148_LCSR_IT1_ITOFU 0x330
-#define TSI148_LCSR_IT1_ITOFL 0x334
-#define TSI148_LCSR_IT1_ITAT 0x338
-
-#define TSI148_LCSR_IT2_ITSAU 0x340
-#define TSI148_LCSR_IT2_ITSAL 0x344
-#define TSI148_LCSR_IT2_ITEAU 0x348
-#define TSI148_LCSR_IT2_ITEAL 0x34C
-#define TSI148_LCSR_IT2_ITOFU 0x350
-#define TSI148_LCSR_IT2_ITOFL 0x354
-#define TSI148_LCSR_IT2_ITAT 0x358
-
-#define TSI148_LCSR_IT3_ITSAU 0x360
-#define TSI148_LCSR_IT3_ITSAL 0x364
-#define TSI148_LCSR_IT3_ITEAU 0x368
-#define TSI148_LCSR_IT3_ITEAL 0x36C
-#define TSI148_LCSR_IT3_ITOFU 0x370
-#define TSI148_LCSR_IT3_ITOFL 0x374
-#define TSI148_LCSR_IT3_ITAT 0x378
-
-#define TSI148_LCSR_IT4_ITSAU 0x380
-#define TSI148_LCSR_IT4_ITSAL 0x384
-#define TSI148_LCSR_IT4_ITEAU 0x388
-#define TSI148_LCSR_IT4_ITEAL 0x38C
-#define TSI148_LCSR_IT4_ITOFU 0x390
-#define TSI148_LCSR_IT4_ITOFL 0x394
-#define TSI148_LCSR_IT4_ITAT 0x398
-
-#define TSI148_LCSR_IT5_ITSAU 0x3A0
-#define TSI148_LCSR_IT5_ITSAL 0x3A4
-#define TSI148_LCSR_IT5_ITEAU 0x3A8
-#define TSI148_LCSR_IT5_ITEAL 0x3AC
-#define TSI148_LCSR_IT5_ITOFU 0x3B0
-#define TSI148_LCSR_IT5_ITOFL 0x3B4
-#define TSI148_LCSR_IT5_ITAT 0x3B8
-
-#define TSI148_LCSR_IT6_ITSAU 0x3C0
-#define TSI148_LCSR_IT6_ITSAL 0x3C4
-#define TSI148_LCSR_IT6_ITEAU 0x3C8
-#define TSI148_LCSR_IT6_ITEAL 0x3CC
-#define TSI148_LCSR_IT6_ITOFU 0x3D0
-#define TSI148_LCSR_IT6_ITOFL 0x3D4
-#define TSI148_LCSR_IT6_ITAT 0x3D8
-
-#define TSI148_LCSR_IT7_ITSAU 0x3E0
-#define TSI148_LCSR_IT7_ITSAL 0x3E4
-#define TSI148_LCSR_IT7_ITEAU 0x3E8
-#define TSI148_LCSR_IT7_ITEAL 0x3EC
-#define TSI148_LCSR_IT7_ITOFU 0x3F0
-#define TSI148_LCSR_IT7_ITOFL 0x3F4
-#define TSI148_LCSR_IT7_ITAT 0x3F8
-
-
-#define TSI148_LCSR_IT0 0x300
-#define TSI148_LCSR_IT1 0x320
-#define TSI148_LCSR_IT2 0x340
-#define TSI148_LCSR_IT3 0x360
-#define TSI148_LCSR_IT4 0x380
-#define TSI148_LCSR_IT5 0x3A0
-#define TSI148_LCSR_IT6 0x3C0
-#define TSI148_LCSR_IT7 0x3E0
-
-static const int TSI148_LCSR_IT[8] = { TSI148_LCSR_IT0, TSI148_LCSR_IT1,
- TSI148_LCSR_IT2, TSI148_LCSR_IT3,
- TSI148_LCSR_IT4, TSI148_LCSR_IT5,
- TSI148_LCSR_IT6, TSI148_LCSR_IT7 };
-
-#define TSI148_LCSR_OFFSET_ITSAU 0x0
-#define TSI148_LCSR_OFFSET_ITSAL 0x4
-#define TSI148_LCSR_OFFSET_ITEAU 0x8
-#define TSI148_LCSR_OFFSET_ITEAL 0xC
-#define TSI148_LCSR_OFFSET_ITOFU 0x10
-#define TSI148_LCSR_OFFSET_ITOFL 0x14
-#define TSI148_LCSR_OFFSET_ITAT 0x18
-
- /*
- * Inbound Translation GCSR
- * offset 400
- */
-#define TSI148_LCSR_GBAU 0x400
-#define TSI148_LCSR_GBAL 0x404
-#define TSI148_LCSR_GCSRAT 0x408
-
- /*
- * Inbound Translation CRG
- * offset 40C
- */
-#define TSI148_LCSR_CBAU 0x40C
-#define TSI148_LCSR_CBAL 0x410
-#define TSI148_LCSR_CSRAT 0x414
-
- /*
- * Inbound Translation CR/CSR
- * CRG
- * offset 418
- */
-#define TSI148_LCSR_CROU 0x418
-#define TSI148_LCSR_CROL 0x41C
-#define TSI148_LCSR_CRAT 0x420
-
- /*
- * Inbound Translation Location Monitor
- * offset 424
- */
-#define TSI148_LCSR_LMBAU 0x424
-#define TSI148_LCSR_LMBAL 0x428
-#define TSI148_LCSR_LMAT 0x42C
-
- /*
- * VMEbus Interrupt Control.
- * offset 430
- */
-#define TSI148_LCSR_BCU 0x430
-#define TSI148_LCSR_BCL 0x434
-#define TSI148_LCSR_BPGTR 0x438
-#define TSI148_LCSR_BPCTR 0x43C
-#define TSI148_LCSR_VICR 0x440
-
- /*
- * Local Bus Interrupt Control.
- * offset 448
- */
-#define TSI148_LCSR_INTEN 0x448
-#define TSI148_LCSR_INTEO 0x44C
-#define TSI148_LCSR_INTS 0x450
-#define TSI148_LCSR_INTC 0x454
-#define TSI148_LCSR_INTM1 0x458
-#define TSI148_LCSR_INTM2 0x45C
-
- /*
- * DMA Controllers
- * offset 500
- */
-#define TSI148_LCSR_DCTL0 0x500
-#define TSI148_LCSR_DSTA0 0x504
-#define TSI148_LCSR_DCSAU0 0x508
-#define TSI148_LCSR_DCSAL0 0x50C
-#define TSI148_LCSR_DCDAU0 0x510
-#define TSI148_LCSR_DCDAL0 0x514
-#define TSI148_LCSR_DCLAU0 0x518
-#define TSI148_LCSR_DCLAL0 0x51C
-#define TSI148_LCSR_DSAU0 0x520
-#define TSI148_LCSR_DSAL0 0x524
-#define TSI148_LCSR_DDAU0 0x528
-#define TSI148_LCSR_DDAL0 0x52C
-#define TSI148_LCSR_DSAT0 0x530
-#define TSI148_LCSR_DDAT0 0x534
-#define TSI148_LCSR_DNLAU0 0x538
-#define TSI148_LCSR_DNLAL0 0x53C
-#define TSI148_LCSR_DCNT0 0x540
-#define TSI148_LCSR_DDBS0 0x544
-
-#define TSI148_LCSR_DCTL1 0x580
-#define TSI148_LCSR_DSTA1 0x584
-#define TSI148_LCSR_DCSAU1 0x588
-#define TSI148_LCSR_DCSAL1 0x58C
-#define TSI148_LCSR_DCDAU1 0x590
-#define TSI148_LCSR_DCDAL1 0x594
-#define TSI148_LCSR_DCLAU1 0x598
-#define TSI148_LCSR_DCLAL1 0x59C
-#define TSI148_LCSR_DSAU1 0x5A0
-#define TSI148_LCSR_DSAL1 0x5A4
-#define TSI148_LCSR_DDAU1 0x5A8
-#define TSI148_LCSR_DDAL1 0x5AC
-#define TSI148_LCSR_DSAT1 0x5B0
-#define TSI148_LCSR_DDAT1 0x5B4
-#define TSI148_LCSR_DNLAU1 0x5B8
-#define TSI148_LCSR_DNLAL1 0x5BC
-#define TSI148_LCSR_DCNT1 0x5C0
-#define TSI148_LCSR_DDBS1 0x5C4
-
-#define TSI148_LCSR_DMA0 0x500
-#define TSI148_LCSR_DMA1 0x580
-
-
-static const int TSI148_LCSR_DMA[TSI148_MAX_DMA] = { TSI148_LCSR_DMA0,
- TSI148_LCSR_DMA1 };
-
-#define TSI148_LCSR_OFFSET_DCTL 0x0
-#define TSI148_LCSR_OFFSET_DSTA 0x4
-#define TSI148_LCSR_OFFSET_DCSAU 0x8
-#define TSI148_LCSR_OFFSET_DCSAL 0xC
-#define TSI148_LCSR_OFFSET_DCDAU 0x10
-#define TSI148_LCSR_OFFSET_DCDAL 0x14
-#define TSI148_LCSR_OFFSET_DCLAU 0x18
-#define TSI148_LCSR_OFFSET_DCLAL 0x1C
-#define TSI148_LCSR_OFFSET_DSAU 0x20
-#define TSI148_LCSR_OFFSET_DSAL 0x24
-#define TSI148_LCSR_OFFSET_DDAU 0x28
-#define TSI148_LCSR_OFFSET_DDAL 0x2C
-#define TSI148_LCSR_OFFSET_DSAT 0x30
-#define TSI148_LCSR_OFFSET_DDAT 0x34
-#define TSI148_LCSR_OFFSET_DNLAU 0x38
-#define TSI148_LCSR_OFFSET_DNLAL 0x3C
-#define TSI148_LCSR_OFFSET_DCNT 0x40
-#define TSI148_LCSR_OFFSET_DDBS 0x44
-
- /*
- * GCSR Register Group
- */
-
- /*
- * GCSR CRG
- * offset 00 600 - DEVI/VENI
- * offset 04 604 - CTRL/GA/REVID
- * offset 08 608 - Semaphore3/2/1/0
- * offset 0C 60C - Seamphore7/6/5/4
- */
-#define TSI148_GCSR_ID 0x600
-#define TSI148_GCSR_CSR 0x604
-#define TSI148_GCSR_SEMA0 0x608
-#define TSI148_GCSR_SEMA1 0x60C
-
- /*
- * Mail Box
- * GCSR CRG
- * offset 10 610 - Mailbox0
- */
-#define TSI148_GCSR_MBOX0 0x610
-#define TSI148_GCSR_MBOX1 0x614
-#define TSI148_GCSR_MBOX2 0x618
-#define TSI148_GCSR_MBOX3 0x61C
-
-static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
- TSI148_GCSR_MBOX1,
- TSI148_GCSR_MBOX2,
- TSI148_GCSR_MBOX3 };
-
- /*
- * CR/CSR
- */
-
- /*
- * CR/CSR CRG
- * offset 7FFF4 FF4 - CSRBCR
- * offset 7FFF8 FF8 - CSRBSR
- * offset 7FFFC FFC - CBAR
- */
-#define TSI148_CSRBCR 0xFF4
-#define TSI148_CSRBSR 0xFF8
-#define TSI148_CBAR 0xFFC
-
-
-
-
- /*
- * TSI148 Register Bit Definitions
- */
-
- /*
- * PFCS Register Set
- */
-#define TSI148_PCFS_CMMD_SERR (1<<8) /* SERR_L out pin ssys err */
-#define TSI148_PCFS_CMMD_PERR (1<<6) /* PERR_L out pin parity */
-#define TSI148_PCFS_CMMD_MSTR (1<<2) /* PCI bus master */
-#define TSI148_PCFS_CMMD_MEMSP (1<<1) /* PCI mem space access */
-#define TSI148_PCFS_CMMD_IOSP (1<<0) /* PCI I/O space enable */
-
-#define TSI148_PCFS_STAT_RCPVE (1<<15) /* Detected Parity Error */
-#define TSI148_PCFS_STAT_SIGSE (1<<14) /* Signalled System Error */
-#define TSI148_PCFS_STAT_RCVMA (1<<13) /* Received Master Abort */
-#define TSI148_PCFS_STAT_RCVTA (1<<12) /* Received Target Abort */
-#define TSI148_PCFS_STAT_SIGTA (1<<11) /* Signalled Target Abort */
-#define TSI148_PCFS_STAT_SELTIM (3<<9) /* DELSEL Timing */
-#define TSI148_PCFS_STAT_DPAR (1<<8) /* Data Parity Err Reported */
-#define TSI148_PCFS_STAT_FAST (1<<7) /* Fast back-to-back Cap */
-#define TSI148_PCFS_STAT_P66M (1<<5) /* 66 MHz Capable */
-#define TSI148_PCFS_STAT_CAPL (1<<4) /* Capab List - address $34 */
-
-/*
- * Revision ID/Class Code Registers (CRG +$008)
- */
-#define TSI148_PCFS_CLAS_M (0xFF<<24) /* Class ID */
-#define TSI148_PCFS_SUBCLAS_M (0xFF<<16) /* Sub-Class ID */
-#define TSI148_PCFS_PROGIF_M (0xFF<<8) /* Sub-Class ID */
-#define TSI148_PCFS_REVID_M (0xFF<<0) /* Rev ID */
-
-/*
- * Cache Line Size/ Master Latency Timer/ Header Type Registers (CRG + $00C)
- */
-#define TSI148_PCFS_HEAD_M (0xFF<<16) /* Master Lat Timer */
-#define TSI148_PCFS_MLAT_M (0xFF<<8) /* Master Lat Timer */
-#define TSI148_PCFS_CLSZ_M (0xFF<<0) /* Cache Line Size */
-
-/*
- * Memory Base Address Lower Reg (CRG + $010)
- */
-#define TSI148_PCFS_MBARL_BASEL_M (0xFFFFF<<12) /* Base Addr Lower Mask */
-#define TSI148_PCFS_MBARL_PRE (1<<3) /* Prefetch */
-#define TSI148_PCFS_MBARL_MTYPE_M (3<<1) /* Memory Type Mask */
-#define TSI148_PCFS_MBARL_IOMEM (1<<0) /* I/O Space Indicator */
-
-/*
- * Message Signaled Interrupt Capabilities Register (CRG + $040)
- */
-#define TSI148_PCFS_MSICAP_64BAC (1<<7) /* 64-bit Address Capable */
-#define TSI148_PCFS_MSICAP_MME_M (7<<4) /* Multiple Msg Enable Mask */
-#define TSI148_PCFS_MSICAP_MMC_M (7<<1) /* Multiple Msg Capable Mask */
-#define TSI148_PCFS_MSICAP_MSIEN (1<<0) /* Msg signaled INT Enable */
-
-/*
- * Message Address Lower Register (CRG +$044)
- */
-#define TSI148_PCFS_MSIAL_M (0x3FFFFFFF<<2) /* Mask */
-
-/*
- * Message Data Register (CRG + 4C)
- */
-#define TSI148_PCFS_MSIMD_M (0xFFFF<<0) /* Mask */
-
-/*
- * PCI-X Capabilities Register (CRG + $050)
- */
-#define TSI148_PCFS_PCIXCAP_MOST_M (7<<4) /* Max outstanding Split Tran */
-#define TSI148_PCFS_PCIXCAP_MMRBC_M (3<<2) /* Max Mem Read byte cnt */
-#define TSI148_PCFS_PCIXCAP_ERO (1<<1) /* Enable Relaxed Ordering */
-#define TSI148_PCFS_PCIXCAP_DPERE (1<<0) /* Data Parity Recover Enable */
-
-/*
- * PCI-X Status Register (CRG +$054)
- */
-#define TSI148_PCFS_PCIXSTAT_RSCEM (1<<29) /* Received Split Comp Error */
-#define TSI148_PCFS_PCIXSTAT_DMCRS_M (7<<26) /* max Cumulative Read Size */
-#define TSI148_PCFS_PCIXSTAT_DMOST_M (7<<23) /* max outstanding Split Trans
- */
-#define TSI148_PCFS_PCIXSTAT_DMMRC_M (3<<21) /* max mem read byte count */
-#define TSI148_PCFS_PCIXSTAT_DC (1<<20) /* Device Complexity */
-#define TSI148_PCFS_PCIXSTAT_USC (1<<19) /* Unexpected Split comp */
-#define TSI148_PCFS_PCIXSTAT_SCD (1<<18) /* Split completion discard */
-#define TSI148_PCFS_PCIXSTAT_133C (1<<17) /* 133MHz capable */
-#define TSI148_PCFS_PCIXSTAT_64D (1<<16) /* 64 bit device */
-#define TSI148_PCFS_PCIXSTAT_BN_M (0xFF<<8) /* Bus number */
-#define TSI148_PCFS_PCIXSTAT_DN_M (0x1F<<3) /* Device number */
-#define TSI148_PCFS_PCIXSTAT_FN_M (7<<0) /* Function Number */
-
-/*
- * LCSR Registers
- */
-
-/*
- * Outbound Translation Starting Address Lower
- */
-#define TSI148_LCSR_OTSAL_M (0xFFFF<<16) /* Mask */
-
-/*
- * Outbound Translation Ending Address Lower
- */
-#define TSI148_LCSR_OTEAL_M (0xFFFF<<16) /* Mask */
-
-/*
- * Outbound Translation Offset Lower
- */
-#define TSI148_LCSR_OTOFFL_M (0xFFFF<<16) /* Mask */
-
-/*
- * Outbound Translation 2eSST Broadcast Select
- */
-#define TSI148_LCSR_OTBS_M (0xFFFFF<<0) /* Mask */
-
-/*
- * Outbound Translation Attribute
- */
-#define TSI148_LCSR_OTAT_EN (1<<31) /* Window Enable */
-#define TSI148_LCSR_OTAT_MRPFD (1<<18) /* Prefetch Disable */
-
-#define TSI148_LCSR_OTAT_PFS_M (3<<16) /* Prefetch Size Mask */
-#define TSI148_LCSR_OTAT_PFS_2 (0<<16) /* 2 Cache Lines P Size */
-#define TSI148_LCSR_OTAT_PFS_4 (1<<16) /* 4 Cache Lines P Size */
-#define TSI148_LCSR_OTAT_PFS_8 (2<<16) /* 8 Cache Lines P Size */
-#define TSI148_LCSR_OTAT_PFS_16 (3<<16) /* 16 Cache Lines P Size */
-
-#define TSI148_LCSR_OTAT_2eSSTM_M (7<<11) /* 2eSST Xfer Rate Mask */
-#define TSI148_LCSR_OTAT_2eSSTM_160 (0<<11) /* 160MB/s 2eSST Xfer Rate */
-#define TSI148_LCSR_OTAT_2eSSTM_267 (1<<11) /* 267MB/s 2eSST Xfer Rate */
-#define TSI148_LCSR_OTAT_2eSSTM_320 (2<<11) /* 320MB/s 2eSST Xfer Rate */
-
-#define TSI148_LCSR_OTAT_TM_M (7<<8) /* Xfer Protocol Mask */
-#define TSI148_LCSR_OTAT_TM_SCT (0<<8) /* SCT Xfer Protocol */
-#define TSI148_LCSR_OTAT_TM_BLT (1<<8) /* BLT Xfer Protocol */
-#define TSI148_LCSR_OTAT_TM_MBLT (2<<8) /* MBLT Xfer Protocol */
-#define TSI148_LCSR_OTAT_TM_2eVME (3<<8) /* 2eVME Xfer Protocol */
-#define TSI148_LCSR_OTAT_TM_2eSST (4<<8) /* 2eSST Xfer Protocol */
-#define TSI148_LCSR_OTAT_TM_2eSSTB (5<<8) /* 2eSST Bcast Xfer Protocol */
-
-#define TSI148_LCSR_OTAT_DBW_M (3<<6) /* Max Data Width */
-#define TSI148_LCSR_OTAT_DBW_16 (0<<6) /* 16-bit Data Width */
-#define TSI148_LCSR_OTAT_DBW_32 (1<<6) /* 32-bit Data Width */
-
-#define TSI148_LCSR_OTAT_SUP (1<<5) /* Supervisory Access */
-#define TSI148_LCSR_OTAT_PGM (1<<4) /* Program Access */
-
-#define TSI148_LCSR_OTAT_AMODE_M (0xf<<0) /* Address Mode Mask */
-#define TSI148_LCSR_OTAT_AMODE_A16 (0<<0) /* A16 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_A24 (1<<0) /* A24 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_A32 (2<<0) /* A32 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_A64 (4<<0) /* A32 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_CRCSR (5<<0) /* CR/CSR Address Space */
-#define TSI148_LCSR_OTAT_AMODE_USER1 (8<<0) /* User1 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_USER2 (9<<0) /* User2 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_USER3 (10<<0) /* User3 Address Space */
-#define TSI148_LCSR_OTAT_AMODE_USER4 (11<<0) /* User4 Address Space */
-
-/*
- * VME Master Control Register CRG+$234
- */
-#define TSI148_LCSR_VMCTRL_VSA (1<<27) /* VMEbus Stop Ack */
-#define TSI148_LCSR_VMCTRL_VS (1<<26) /* VMEbus Stop */
-#define TSI148_LCSR_VMCTRL_DHB (1<<25) /* Device Has Bus */
-#define TSI148_LCSR_VMCTRL_DWB (1<<24) /* Device Wants Bus */
-
-#define TSI148_LCSR_VMCTRL_RMWEN (1<<20) /* RMW Enable */
-
-#define TSI148_LCSR_VMCTRL_ATO_M (7<<16) /* Master Access Time-out Mask
- */
-#define TSI148_LCSR_VMCTRL_ATO_32 (0<<16) /* 32 us */
-#define TSI148_LCSR_VMCTRL_ATO_128 (1<<16) /* 128 us */
-#define TSI148_LCSR_VMCTRL_ATO_512 (2<<16) /* 512 us */
-#define TSI148_LCSR_VMCTRL_ATO_2M (3<<16) /* 2 ms */
-#define TSI148_LCSR_VMCTRL_ATO_8M (4<<16) /* 8 ms */
-#define TSI148_LCSR_VMCTRL_ATO_32M (5<<16) /* 32 ms */
-#define TSI148_LCSR_VMCTRL_ATO_128M (6<<16) /* 128 ms */
-#define TSI148_LCSR_VMCTRL_ATO_DIS (7<<16) /* Disabled */
-
-#define TSI148_LCSR_VMCTRL_VTOFF_M (7<<12) /* VMEbus Master Time off */
-#define TSI148_LCSR_VMCTRL_VTOFF_0 (0<<12) /* 0us */
-#define TSI148_LCSR_VMCTRL_VTOFF_1 (1<<12) /* 1us */
-#define TSI148_LCSR_VMCTRL_VTOFF_2 (2<<12) /* 2us */
-#define TSI148_LCSR_VMCTRL_VTOFF_4 (3<<12) /* 4us */
-#define TSI148_LCSR_VMCTRL_VTOFF_8 (4<<12) /* 8us */
-#define TSI148_LCSR_VMCTRL_VTOFF_16 (5<<12) /* 16us */
-#define TSI148_LCSR_VMCTRL_VTOFF_32 (6<<12) /* 32us */
-#define TSI148_LCSR_VMCTRL_VTOFF_64 (7<<12) /* 64us */
-
-#define TSI148_LCSR_VMCTRL_VTON_M (7<<8) /* VMEbus Master Time On */
-#define TSI148_LCSR_VMCTRL_VTON_4 (0<<8) /* 8us */
-#define TSI148_LCSR_VMCTRL_VTON_8 (1<<8) /* 8us */
-#define TSI148_LCSR_VMCTRL_VTON_16 (2<<8) /* 16us */
-#define TSI148_LCSR_VMCTRL_VTON_32 (3<<8) /* 32us */
-#define TSI148_LCSR_VMCTRL_VTON_64 (4<<8) /* 64us */
-#define TSI148_LCSR_VMCTRL_VTON_128 (5<<8) /* 128us */
-#define TSI148_LCSR_VMCTRL_VTON_256 (6<<8) /* 256us */
-#define TSI148_LCSR_VMCTRL_VTON_512 (7<<8) /* 512us */
-
-#define TSI148_LCSR_VMCTRL_VREL_M (3<<3) /* VMEbus Master Rel Mode Mask
- */
-#define TSI148_LCSR_VMCTRL_VREL_T_D (0<<3) /* Time on or Done */
-#define TSI148_LCSR_VMCTRL_VREL_T_R_D (1<<3) /* Time on and REQ or Done */
-#define TSI148_LCSR_VMCTRL_VREL_T_B_D (2<<3) /* Time on and BCLR or Done */
-#define TSI148_LCSR_VMCTRL_VREL_T_D_R (3<<3) /* Time on or Done and REQ */
-
-#define TSI148_LCSR_VMCTRL_VFAIR (1<<2) /* VMEbus Master Fair Mode */
-#define TSI148_LCSR_VMCTRL_VREQL_M (3<<0) /* VMEbus Master Req Level Mask
- */
-
-/*
- * VMEbus Control Register CRG+$238
- */
-#define TSI148_LCSR_VCTRL_LRE (1<<31) /* Late Retry Enable */
-
-#define TSI148_LCSR_VCTRL_DLT_M (0xF<<24) /* Deadlock Timer */
-#define TSI148_LCSR_VCTRL_DLT_OFF (0<<24) /* Deadlock Timer Off */
-#define TSI148_LCSR_VCTRL_DLT_16 (1<<24) /* 16 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_32 (2<<24) /* 32 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_64 (3<<24) /* 64 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_128 (4<<24) /* 128 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_256 (5<<24) /* 256 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_512 (6<<24) /* 512 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_1024 (7<<24) /* 1024 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_2048 (8<<24) /* 2048 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_4096 (9<<24) /* 4096 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_8192 (0xA<<24) /* 8192 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_16384 (0xB<<24) /* 16384 VCLKS */
-#define TSI148_LCSR_VCTRL_DLT_32768 (0xC<<24) /* 32768 VCLKS */
-
-#define TSI148_LCSR_VCTRL_NERBB (1<<20) /* No Early Release of Bus Busy
- */
-
-#define TSI148_LCSR_VCTRL_SRESET (1<<17) /* System Reset */
-#define TSI148_LCSR_VCTRL_LRESET (1<<16) /* Local Reset */
-
-#define TSI148_LCSR_VCTRL_SFAILAI (1<<15) /* SYSFAIL Auto Slot ID */
-#define TSI148_LCSR_VCTRL_BID_M (0x1F<<8) /* Broadcast ID Mask */
-
-#define TSI148_LCSR_VCTRL_ATOEN (1<<7) /* Arbiter Time-out Enable */
-#define TSI148_LCSR_VCTRL_ROBIN (1<<6) /* VMEbus Round Robin */
-
-#define TSI148_LCSR_VCTRL_GTO_M (7<<0) /* VMEbus Global Time-out Mask
- */
-#define TSI148_LCSR_VCTRL_GTO_8 (0<<0) /* 8 us */
-#define TSI148_LCSR_VCTRL_GTO_16 (1<<0) /* 16 us */
-#define TSI148_LCSR_VCTRL_GTO_32 (2<<0) /* 32 us */
-#define TSI148_LCSR_VCTRL_GTO_64 (3<<0) /* 64 us */
-#define TSI148_LCSR_VCTRL_GTO_128 (4<<0) /* 128 us */
-#define TSI148_LCSR_VCTRL_GTO_256 (5<<0) /* 256 us */
-#define TSI148_LCSR_VCTRL_GTO_512 (6<<0) /* 512 us */
-#define TSI148_LCSR_VCTRL_GTO_DIS (7<<0) /* Disabled */
-
-/*
- * VMEbus Status Register CRG + $23C
- */
-#define TSI148_LCSR_VSTAT_CPURST (1<<15) /* Clear power up reset */
-#define TSI148_LCSR_VSTAT_BRDFL (1<<14) /* Board fail */
-#define TSI148_LCSR_VSTAT_PURSTS (1<<12) /* Power up reset status */
-#define TSI148_LCSR_VSTAT_BDFAILS (1<<11) /* Board Fail Status */
-#define TSI148_LCSR_VSTAT_SYSFAILS (1<<10) /* System Fail Status */
-#define TSI148_LCSR_VSTAT_ACFAILS (1<<9) /* AC fail status */
-#define TSI148_LCSR_VSTAT_SCONS (1<<8) /* System Cont Status */
-#define TSI148_LCSR_VSTAT_GAP (1<<5) /* Geographic Addr Parity */
-#define TSI148_LCSR_VSTAT_GA_M (0x1F<<0) /* Geographic Addr Mask */
-
-/*
- * PCI Configuration Status Register CRG+$240
- */
-#define TSI148_LCSR_PSTAT_REQ64S (1<<6) /* Request 64 status set */
-#define TSI148_LCSR_PSTAT_M66ENS (1<<5) /* M66ENS 66Mhz enable */
-#define TSI148_LCSR_PSTAT_FRAMES (1<<4) /* Frame Status */
-#define TSI148_LCSR_PSTAT_IRDYS (1<<3) /* IRDY status */
-#define TSI148_LCSR_PSTAT_DEVSELS (1<<2) /* DEVL status */
-#define TSI148_LCSR_PSTAT_STOPS (1<<1) /* STOP status */
-#define TSI148_LCSR_PSTAT_TRDYS (1<<0) /* TRDY status */
-
-/*
- * VMEbus Exception Attributes Register CRG + $268
- */
-#define TSI148_LCSR_VEAT_VES (1<<31) /* Status */
-#define TSI148_LCSR_VEAT_VEOF (1<<30) /* Overflow */
-#define TSI148_LCSR_VEAT_VESCL (1<<29) /* Status Clear */
-#define TSI148_LCSR_VEAT_2EOT (1<<21) /* 2e Odd Termination */
-#define TSI148_LCSR_VEAT_2EST (1<<20) /* 2e Slave terminated */
-#define TSI148_LCSR_VEAT_BERR (1<<19) /* Bus Error */
-#define TSI148_LCSR_VEAT_LWORD (1<<18) /* LWORD_ signal state */
-#define TSI148_LCSR_VEAT_WRITE (1<<17) /* WRITE_ signal state */
-#define TSI148_LCSR_VEAT_IACK (1<<16) /* IACK_ signal state */
-#define TSI148_LCSR_VEAT_DS1 (1<<15) /* DS1_ signal state */
-#define TSI148_LCSR_VEAT_DS0 (1<<14) /* DS0_ signal state */
-#define TSI148_LCSR_VEAT_AM_M (0x3F<<8) /* Address Mode Mask */
-#define TSI148_LCSR_VEAT_XAM_M (0xFF<<0) /* Master AMode Mask */
-
-
-/*
- * VMEbus PCI Error Diagnostics PCI/X Attributes Register CRG + $280
- */
-#define TSI148_LCSR_EDPAT_EDPCL (1<<29)
-
-/*
- * Inbound Translation Starting Address Lower
- */
-#define TSI148_LCSR_ITSAL6432_M (0xFFFF<<16) /* Mask */
-#define TSI148_LCSR_ITSAL24_M (0x00FFF<<12) /* Mask */
-#define TSI148_LCSR_ITSAL16_M (0x0000FFF<<4) /* Mask */
-
-/*
- * Inbound Translation Ending Address Lower
- */
-#define TSI148_LCSR_ITEAL6432_M (0xFFFF<<16) /* Mask */
-#define TSI148_LCSR_ITEAL24_M (0x00FFF<<12) /* Mask */
-#define TSI148_LCSR_ITEAL16_M (0x0000FFF<<4) /* Mask */
-
-/*
- * Inbound Translation Offset Lower
- */
-#define TSI148_LCSR_ITOFFL6432_M (0xFFFF<<16) /* Mask */
-#define TSI148_LCSR_ITOFFL24_M (0xFFFFF<<12) /* Mask */
-#define TSI148_LCSR_ITOFFL16_M (0xFFFFFFF<<4) /* Mask */
-
-/*
- * Inbound Translation Attribute
- */
-#define TSI148_LCSR_ITAT_EN (1<<31) /* Window Enable */
-#define TSI148_LCSR_ITAT_TH (1<<18) /* Prefetch Threshold */
-
-#define TSI148_LCSR_ITAT_VFS_M (3<<16) /* Virtual FIFO Size Mask */
-#define TSI148_LCSR_ITAT_VFS_64 (0<<16) /* 64 bytes Virtual FIFO Size */
-#define TSI148_LCSR_ITAT_VFS_128 (1<<16) /* 128 bytes Virtual FIFO Sz */
-#define TSI148_LCSR_ITAT_VFS_256 (2<<16) /* 256 bytes Virtual FIFO Sz */
-#define TSI148_LCSR_ITAT_VFS_512 (3<<16) /* 512 bytes Virtual FIFO Sz */
-
-#define TSI148_LCSR_ITAT_2eSSTM_M (7<<12) /* 2eSST Xfer Rate Mask */
-#define TSI148_LCSR_ITAT_2eSSTM_160 (0<<12) /* 160MB/s 2eSST Xfer Rate */
-#define TSI148_LCSR_ITAT_2eSSTM_267 (1<<12) /* 267MB/s 2eSST Xfer Rate */
-#define TSI148_LCSR_ITAT_2eSSTM_320 (2<<12) /* 320MB/s 2eSST Xfer Rate */
-
-#define TSI148_LCSR_ITAT_2eSSTB (1<<11) /* 2eSST Bcast Xfer Protocol */
-#define TSI148_LCSR_ITAT_2eSST (1<<10) /* 2eSST Xfer Protocol */
-#define TSI148_LCSR_ITAT_2eVME (1<<9) /* 2eVME Xfer Protocol */
-#define TSI148_LCSR_ITAT_MBLT (1<<8) /* MBLT Xfer Protocol */
-#define TSI148_LCSR_ITAT_BLT (1<<7) /* BLT Xfer Protocol */
-
-#define TSI148_LCSR_ITAT_AS_M (7<<4) /* Address Space Mask */
-#define TSI148_LCSR_ITAT_AS_A16 (0<<4) /* A16 Address Space */
-#define TSI148_LCSR_ITAT_AS_A24 (1<<4) /* A24 Address Space */
-#define TSI148_LCSR_ITAT_AS_A32 (2<<4) /* A32 Address Space */
-#define TSI148_LCSR_ITAT_AS_A64 (4<<4) /* A64 Address Space */
-
-#define TSI148_LCSR_ITAT_SUPR (1<<3) /* Supervisor Access */
-#define TSI148_LCSR_ITAT_NPRIV (1<<2) /* Non-Priv (User) Access */
-#define TSI148_LCSR_ITAT_PGM (1<<1) /* Program Access */
-#define TSI148_LCSR_ITAT_DATA (1<<0) /* Data Access */
-
-/*
- * GCSR Base Address Lower Address CRG +$404
- */
-#define TSI148_LCSR_GBAL_M (0x7FFFFFF<<5) /* Mask */
-
-/*
- * GCSR Attribute Register CRG + $408
- */
-#define TSI148_LCSR_GCSRAT_EN (1<<7) /* Enable access to GCSR */
-
-#define TSI148_LCSR_GCSRAT_AS_M (7<<4) /* Address Space Mask */
-#define TSI148_LCSR_GCSRAT_AS_A16 (0<<4) /* Address Space 16 */
-#define TSI148_LCSR_GCSRAT_AS_A24 (1<<4) /* Address Space 24 */
-#define TSI148_LCSR_GCSRAT_AS_A32 (2<<4) /* Address Space 32 */
-#define TSI148_LCSR_GCSRAT_AS_A64 (4<<4) /* Address Space 64 */
-
-#define TSI148_LCSR_GCSRAT_SUPR (1<<3) /* Sup set -GCSR decoder */
-#define TSI148_LCSR_GCSRAT_NPRIV (1<<2) /* Non-Privliged set - CGSR */
-#define TSI148_LCSR_GCSRAT_PGM (1<<1) /* Program set - GCSR decoder */
-#define TSI148_LCSR_GCSRAT_DATA (1<<0) /* DATA set GCSR decoder */
-
-/*
- * CRG Base Address Lower Address CRG + $410
- */
-#define TSI148_LCSR_CBAL_M (0xFFFFF<<12)
-
-/*
- * CRG Attribute Register CRG + $414
- */
-#define TSI148_LCSR_CRGAT_EN (1<<7) /* Enable PRG Access */
-
-#define TSI148_LCSR_CRGAT_AS_M (7<<4) /* Address Space */
-#define TSI148_LCSR_CRGAT_AS_A16 (0<<4) /* Address Space 16 */
-#define TSI148_LCSR_CRGAT_AS_A24 (1<<4) /* Address Space 24 */
-#define TSI148_LCSR_CRGAT_AS_A32 (2<<4) /* Address Space 32 */
-#define TSI148_LCSR_CRGAT_AS_A64 (4<<4) /* Address Space 64 */
-
-#define TSI148_LCSR_CRGAT_SUPR (1<<3) /* Supervisor Access */
-#define TSI148_LCSR_CRGAT_NPRIV (1<<2) /* Non-Privliged(User) Access */
-#define TSI148_LCSR_CRGAT_PGM (1<<1) /* Program Access */
-#define TSI148_LCSR_CRGAT_DATA (1<<0) /* Data Access */
-
-/*
- * CR/CSR Offset Lower Register CRG + $41C
- */
-#define TSI148_LCSR_CROL_M (0x1FFF<<19) /* Mask */
-
-/*
- * CR/CSR Attribute register CRG + $420
- */
-#define TSI148_LCSR_CRAT_EN (1<<7) /* Enable access to CR/CSR */
-
-/*
- * Location Monitor base address lower register CRG + $428
- */
-#define TSI148_LCSR_LMBAL_M (0x7FFFFFF<<5) /* Mask */
-
-/*
- * Location Monitor Attribute Register CRG + $42C
- */
-#define TSI148_LCSR_LMAT_EN (1<<7) /* Enable Location Monitor */
-
-#define TSI148_LCSR_LMAT_AS_M (7<<4) /* Address Space MASK */
-#define TSI148_LCSR_LMAT_AS_A16 (0<<4) /* A16 */
-#define TSI148_LCSR_LMAT_AS_A24 (1<<4) /* A24 */
-#define TSI148_LCSR_LMAT_AS_A32 (2<<4) /* A32 */
-#define TSI148_LCSR_LMAT_AS_A64 (4<<4) /* A64 */
-
-#define TSI148_LCSR_LMAT_SUPR (1<<3) /* Supervisor Access */
-#define TSI148_LCSR_LMAT_NPRIV (1<<2) /* Non-Priv (User) Access */
-#define TSI148_LCSR_LMAT_PGM (1<<1) /* Program Access */
-#define TSI148_LCSR_LMAT_DATA (1<<0) /* Data Access */
-
-/*
- * Broadcast Pulse Generator Timer Register CRG + $438
- */
-#define TSI148_LCSR_BPGTR_BPGT_M (0xFFFF<<0) /* Mask */
-
-/*
- * Broadcast Programmable Clock Timer Register CRG + $43C
- */
-#define TSI148_LCSR_BPCTR_BPCT_M (0xFFFFFF<<0) /* Mask */
-
-/*
- * VMEbus Interrupt Control Register CRG + $43C
- */
-#define TSI148_LCSR_VICR_CNTS_M (3<<22) /* Cntr Source MASK */
-#define TSI148_LCSR_VICR_CNTS_DIS (1<<22) /* Cntr Disable */
-#define TSI148_LCSR_VICR_CNTS_IRQ1 (2<<22) /* IRQ1 to Cntr */
-#define TSI148_LCSR_VICR_CNTS_IRQ2 (3<<22) /* IRQ2 to Cntr */
-
-#define TSI148_LCSR_VICR_EDGIS_M (3<<20) /* Edge interrupt MASK */
-#define TSI148_LCSR_VICR_EDGIS_DIS (1<<20) /* Edge interrupt Disable */
-#define TSI148_LCSR_VICR_EDGIS_IRQ1 (2<<20) /* IRQ1 to Edge */
-#define TSI148_LCSR_VICR_EDGIS_IRQ2 (3<<20) /* IRQ2 to Edge */
-
-#define TSI148_LCSR_VICR_IRQIF_M (3<<18) /* IRQ1* Function MASK */
-#define TSI148_LCSR_VICR_IRQIF_NORM (1<<18) /* Normal */
-#define TSI148_LCSR_VICR_IRQIF_PULSE (2<<18) /* Pulse Generator */
-#define TSI148_LCSR_VICR_IRQIF_PROG (3<<18) /* Programmable Clock */
-#define TSI148_LCSR_VICR_IRQIF_1U (4<<18) /* 1us Clock */
-
-#define TSI148_LCSR_VICR_IRQ2F_M (3<<16) /* IRQ2* Function MASK */
-#define TSI148_LCSR_VICR_IRQ2F_NORM (1<<16) /* Normal */
-#define TSI148_LCSR_VICR_IRQ2F_PULSE (2<<16) /* Pulse Generator */
-#define TSI148_LCSR_VICR_IRQ2F_PROG (3<<16) /* Programmable Clock */
-#define TSI148_LCSR_VICR_IRQ2F_1U (4<<16) /* 1us Clock */
-
-#define TSI148_LCSR_VICR_BIP (1<<15) /* Broadcast Interrupt Pulse */
-
-#define TSI148_LCSR_VICR_IRQC (1<<12) /* VMEbus IRQ Clear */
-#define TSI148_LCSR_VICR_IRQS (1<<11) /* VMEbus IRQ Status */
-
-#define TSI148_LCSR_VICR_IRQL_M (7<<8) /* VMEbus SW IRQ Level Mask */
-#define TSI148_LCSR_VICR_IRQL_1 (1<<8) /* VMEbus SW IRQ Level 1 */
-#define TSI148_LCSR_VICR_IRQL_2 (2<<8) /* VMEbus SW IRQ Level 2 */
-#define TSI148_LCSR_VICR_IRQL_3 (3<<8) /* VMEbus SW IRQ Level 3 */
-#define TSI148_LCSR_VICR_IRQL_4 (4<<8) /* VMEbus SW IRQ Level 4 */
-#define TSI148_LCSR_VICR_IRQL_5 (5<<8) /* VMEbus SW IRQ Level 5 */
-#define TSI148_LCSR_VICR_IRQL_6 (6<<8) /* VMEbus SW IRQ Level 6 */
-#define TSI148_LCSR_VICR_IRQL_7 (7<<8) /* VMEbus SW IRQ Level 7 */
-
-static const int TSI148_LCSR_VICR_IRQL[8] = { 0, TSI148_LCSR_VICR_IRQL_1,
- TSI148_LCSR_VICR_IRQL_2, TSI148_LCSR_VICR_IRQL_3,
- TSI148_LCSR_VICR_IRQL_4, TSI148_LCSR_VICR_IRQL_5,
- TSI148_LCSR_VICR_IRQL_6, TSI148_LCSR_VICR_IRQL_7 };
-
-#define TSI148_LCSR_VICR_STID_M (0xFF<<0) /* Status/ID Mask */
-
-/*
- * Interrupt Enable Register CRG + $440
- */
-#define TSI148_LCSR_INTEN_DMA1EN (1<<25) /* DMAC 1 */
-#define TSI148_LCSR_INTEN_DMA0EN (1<<24) /* DMAC 0 */
-#define TSI148_LCSR_INTEN_LM3EN (1<<23) /* Location Monitor 3 */
-#define TSI148_LCSR_INTEN_LM2EN (1<<22) /* Location Monitor 2 */
-#define TSI148_LCSR_INTEN_LM1EN (1<<21) /* Location Monitor 1 */
-#define TSI148_LCSR_INTEN_LM0EN (1<<20) /* Location Monitor 0 */
-#define TSI148_LCSR_INTEN_MB3EN (1<<19) /* Mail Box 3 */
-#define TSI148_LCSR_INTEN_MB2EN (1<<18) /* Mail Box 2 */
-#define TSI148_LCSR_INTEN_MB1EN (1<<17) /* Mail Box 1 */
-#define TSI148_LCSR_INTEN_MB0EN (1<<16) /* Mail Box 0 */
-#define TSI148_LCSR_INTEN_PERREN (1<<13) /* PCI/X Error */
-#define TSI148_LCSR_INTEN_VERREN (1<<12) /* VMEbus Error */
-#define TSI148_LCSR_INTEN_VIEEN (1<<11) /* VMEbus IRQ Edge */
-#define TSI148_LCSR_INTEN_IACKEN (1<<10) /* IACK */
-#define TSI148_LCSR_INTEN_SYSFLEN (1<<9) /* System Fail */
-#define TSI148_LCSR_INTEN_ACFLEN (1<<8) /* AC Fail */
-#define TSI148_LCSR_INTEN_IRQ7EN (1<<7) /* IRQ7 */
-#define TSI148_LCSR_INTEN_IRQ6EN (1<<6) /* IRQ6 */
-#define TSI148_LCSR_INTEN_IRQ5EN (1<<5) /* IRQ5 */
-#define TSI148_LCSR_INTEN_IRQ4EN (1<<4) /* IRQ4 */
-#define TSI148_LCSR_INTEN_IRQ3EN (1<<3) /* IRQ3 */
-#define TSI148_LCSR_INTEN_IRQ2EN (1<<2) /* IRQ2 */
-#define TSI148_LCSR_INTEN_IRQ1EN (1<<1) /* IRQ1 */
-
-static const int TSI148_LCSR_INTEN_LMEN[4] = { TSI148_LCSR_INTEN_LM0EN,
- TSI148_LCSR_INTEN_LM1EN,
- TSI148_LCSR_INTEN_LM2EN,
- TSI148_LCSR_INTEN_LM3EN };
-
-static const int TSI148_LCSR_INTEN_IRQEN[7] = { TSI148_LCSR_INTEN_IRQ1EN,
- TSI148_LCSR_INTEN_IRQ2EN,
- TSI148_LCSR_INTEN_IRQ3EN,
- TSI148_LCSR_INTEN_IRQ4EN,
- TSI148_LCSR_INTEN_IRQ5EN,
- TSI148_LCSR_INTEN_IRQ6EN,
- TSI148_LCSR_INTEN_IRQ7EN };
-
-/*
- * Interrupt Enable Out Register CRG + $444
- */
-#define TSI148_LCSR_INTEO_DMA1EO (1<<25) /* DMAC 1 */
-#define TSI148_LCSR_INTEO_DMA0EO (1<<24) /* DMAC 0 */
-#define TSI148_LCSR_INTEO_LM3EO (1<<23) /* Loc Monitor 3 */
-#define TSI148_LCSR_INTEO_LM2EO (1<<22) /* Loc Monitor 2 */
-#define TSI148_LCSR_INTEO_LM1EO (1<<21) /* Loc Monitor 1 */
-#define TSI148_LCSR_INTEO_LM0EO (1<<20) /* Location Monitor 0 */
-#define TSI148_LCSR_INTEO_MB3EO (1<<19) /* Mail Box 3 */
-#define TSI148_LCSR_INTEO_MB2EO (1<<18) /* Mail Box 2 */
-#define TSI148_LCSR_INTEO_MB1EO (1<<17) /* Mail Box 1 */
-#define TSI148_LCSR_INTEO_MB0EO (1<<16) /* Mail Box 0 */
-#define TSI148_LCSR_INTEO_PERREO (1<<13) /* PCI/X Error */
-#define TSI148_LCSR_INTEO_VERREO (1<<12) /* VMEbus Error */
-#define TSI148_LCSR_INTEO_VIEEO (1<<11) /* VMEbus IRQ Edge */
-#define TSI148_LCSR_INTEO_IACKEO (1<<10) /* IACK */
-#define TSI148_LCSR_INTEO_SYSFLEO (1<<9) /* System Fail */
-#define TSI148_LCSR_INTEO_ACFLEO (1<<8) /* AC Fail */
-#define TSI148_LCSR_INTEO_IRQ7EO (1<<7) /* IRQ7 */
-#define TSI148_LCSR_INTEO_IRQ6EO (1<<6) /* IRQ6 */
-#define TSI148_LCSR_INTEO_IRQ5EO (1<<5) /* IRQ5 */
-#define TSI148_LCSR_INTEO_IRQ4EO (1<<4) /* IRQ4 */
-#define TSI148_LCSR_INTEO_IRQ3EO (1<<3) /* IRQ3 */
-#define TSI148_LCSR_INTEO_IRQ2EO (1<<2) /* IRQ2 */
-#define TSI148_LCSR_INTEO_IRQ1EO (1<<1) /* IRQ1 */
-
-static const int TSI148_LCSR_INTEO_LMEO[4] = { TSI148_LCSR_INTEO_LM0EO,
- TSI148_LCSR_INTEO_LM1EO,
- TSI148_LCSR_INTEO_LM2EO,
- TSI148_LCSR_INTEO_LM3EO };
-
-static const int TSI148_LCSR_INTEO_IRQEO[7] = { TSI148_LCSR_INTEO_IRQ1EO,
- TSI148_LCSR_INTEO_IRQ2EO,
- TSI148_LCSR_INTEO_IRQ3EO,
- TSI148_LCSR_INTEO_IRQ4EO,
- TSI148_LCSR_INTEO_IRQ5EO,
- TSI148_LCSR_INTEO_IRQ6EO,
- TSI148_LCSR_INTEO_IRQ7EO };
-
-/*
- * Interrupt Status Register CRG + $448
- */
-#define TSI148_LCSR_INTS_DMA1S (1<<25) /* DMA 1 */
-#define TSI148_LCSR_INTS_DMA0S (1<<24) /* DMA 0 */
-#define TSI148_LCSR_INTS_LM3S (1<<23) /* Location Monitor 3 */
-#define TSI148_LCSR_INTS_LM2S (1<<22) /* Location Monitor 2 */
-#define TSI148_LCSR_INTS_LM1S (1<<21) /* Location Monitor 1 */
-#define TSI148_LCSR_INTS_LM0S (1<<20) /* Location Monitor 0 */
-#define TSI148_LCSR_INTS_MB3S (1<<19) /* Mail Box 3 */
-#define TSI148_LCSR_INTS_MB2S (1<<18) /* Mail Box 2 */
-#define TSI148_LCSR_INTS_MB1S (1<<17) /* Mail Box 1 */
-#define TSI148_LCSR_INTS_MB0S (1<<16) /* Mail Box 0 */
-#define TSI148_LCSR_INTS_PERRS (1<<13) /* PCI/X Error */
-#define TSI148_LCSR_INTS_VERRS (1<<12) /* VMEbus Error */
-#define TSI148_LCSR_INTS_VIES (1<<11) /* VMEbus IRQ Edge */
-#define TSI148_LCSR_INTS_IACKS (1<<10) /* IACK */
-#define TSI148_LCSR_INTS_SYSFLS (1<<9) /* System Fail */
-#define TSI148_LCSR_INTS_ACFLS (1<<8) /* AC Fail */
-#define TSI148_LCSR_INTS_IRQ7S (1<<7) /* IRQ7 */
-#define TSI148_LCSR_INTS_IRQ6S (1<<6) /* IRQ6 */
-#define TSI148_LCSR_INTS_IRQ5S (1<<5) /* IRQ5 */
-#define TSI148_LCSR_INTS_IRQ4S (1<<4) /* IRQ4 */
-#define TSI148_LCSR_INTS_IRQ3S (1<<3) /* IRQ3 */
-#define TSI148_LCSR_INTS_IRQ2S (1<<2) /* IRQ2 */
-#define TSI148_LCSR_INTS_IRQ1S (1<<1) /* IRQ1 */
-
-static const int TSI148_LCSR_INTS_LMS[4] = { TSI148_LCSR_INTS_LM0S,
- TSI148_LCSR_INTS_LM1S,
- TSI148_LCSR_INTS_LM2S,
- TSI148_LCSR_INTS_LM3S };
-
-static const int TSI148_LCSR_INTS_MBS[4] = { TSI148_LCSR_INTS_MB0S,
- TSI148_LCSR_INTS_MB1S,
- TSI148_LCSR_INTS_MB2S,
- TSI148_LCSR_INTS_MB3S };
-
-/*
- * Interrupt Clear Register CRG + $44C
- */
-#define TSI148_LCSR_INTC_DMA1C (1<<25) /* DMA 1 */
-#define TSI148_LCSR_INTC_DMA0C (1<<24) /* DMA 0 */
-#define TSI148_LCSR_INTC_LM3C (1<<23) /* Location Monitor 3 */
-#define TSI148_LCSR_INTC_LM2C (1<<22) /* Location Monitor 2 */
-#define TSI148_LCSR_INTC_LM1C (1<<21) /* Location Monitor 1 */
-#define TSI148_LCSR_INTC_LM0C (1<<20) /* Location Monitor 0 */
-#define TSI148_LCSR_INTC_MB3C (1<<19) /* Mail Box 3 */
-#define TSI148_LCSR_INTC_MB2C (1<<18) /* Mail Box 2 */
-#define TSI148_LCSR_INTC_MB1C (1<<17) /* Mail Box 1 */
-#define TSI148_LCSR_INTC_MB0C (1<<16) /* Mail Box 0 */
-#define TSI148_LCSR_INTC_PERRC (1<<13) /* VMEbus Error */
-#define TSI148_LCSR_INTC_VERRC (1<<12) /* VMEbus Access Time-out */
-#define TSI148_LCSR_INTC_VIEC (1<<11) /* VMEbus IRQ Edge */
-#define TSI148_LCSR_INTC_IACKC (1<<10) /* IACK */
-#define TSI148_LCSR_INTC_SYSFLC (1<<9) /* System Fail */
-#define TSI148_LCSR_INTC_ACFLC (1<<8) /* AC Fail */
-
-static const int TSI148_LCSR_INTC_LMC[4] = { TSI148_LCSR_INTC_LM0C,
- TSI148_LCSR_INTC_LM1C,
- TSI148_LCSR_INTC_LM2C,
- TSI148_LCSR_INTC_LM3C };
-
-static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C,
- TSI148_LCSR_INTC_MB1C,
- TSI148_LCSR_INTC_MB2C,
- TSI148_LCSR_INTC_MB3C };
-
-/*
- * Interrupt Map Register 1 CRG + $458
- */
-#define TSI148_LCSR_INTM1_DMA1M_M (3<<18) /* DMA 1 */
-#define TSI148_LCSR_INTM1_DMA0M_M (3<<16) /* DMA 0 */
-#define TSI148_LCSR_INTM1_LM3M_M (3<<14) /* Location Monitor 3 */
-#define TSI148_LCSR_INTM1_LM2M_M (3<<12) /* Location Monitor 2 */
-#define TSI148_LCSR_INTM1_LM1M_M (3<<10) /* Location Monitor 1 */
-#define TSI148_LCSR_INTM1_LM0M_M (3<<8) /* Location Monitor 0 */
-#define TSI148_LCSR_INTM1_MB3M_M (3<<6) /* Mail Box 3 */
-#define TSI148_LCSR_INTM1_MB2M_M (3<<4) /* Mail Box 2 */
-#define TSI148_LCSR_INTM1_MB1M_M (3<<2) /* Mail Box 1 */
-#define TSI148_LCSR_INTM1_MB0M_M (3<<0) /* Mail Box 0 */
-
-/*
- * Interrupt Map Register 2 CRG + $45C
- */
-#define TSI148_LCSR_INTM2_PERRM_M (3<<26) /* PCI Bus Error */
-#define TSI148_LCSR_INTM2_VERRM_M (3<<24) /* VMEbus Error */
-#define TSI148_LCSR_INTM2_VIEM_M (3<<22) /* VMEbus IRQ Edge */
-#define TSI148_LCSR_INTM2_IACKM_M (3<<20) /* IACK */
-#define TSI148_LCSR_INTM2_SYSFLM_M (3<<18) /* System Fail */
-#define TSI148_LCSR_INTM2_ACFLM_M (3<<16) /* AC Fail */
-#define TSI148_LCSR_INTM2_IRQ7M_M (3<<14) /* IRQ7 */
-#define TSI148_LCSR_INTM2_IRQ6M_M (3<<12) /* IRQ6 */
-#define TSI148_LCSR_INTM2_IRQ5M_M (3<<10) /* IRQ5 */
-#define TSI148_LCSR_INTM2_IRQ4M_M (3<<8) /* IRQ4 */
-#define TSI148_LCSR_INTM2_IRQ3M_M (3<<6) /* IRQ3 */
-#define TSI148_LCSR_INTM2_IRQ2M_M (3<<4) /* IRQ2 */
-#define TSI148_LCSR_INTM2_IRQ1M_M (3<<2) /* IRQ1 */
-
-/*
- * DMA Control (0-1) Registers CRG + $500
- */
-#define TSI148_LCSR_DCTL_ABT (1<<27) /* Abort */
-#define TSI148_LCSR_DCTL_PAU (1<<26) /* Pause */
-#define TSI148_LCSR_DCTL_DGO (1<<25) /* DMA Go */
-
-#define TSI148_LCSR_DCTL_MOD (1<<23) /* Mode */
-
-#define TSI148_LCSR_DCTL_VBKS_M (7<<12) /* VMEbus block Size MASK */
-#define TSI148_LCSR_DCTL_VBKS_32 (0<<12) /* VMEbus block Size 32 */
-#define TSI148_LCSR_DCTL_VBKS_64 (1<<12) /* VMEbus block Size 64 */
-#define TSI148_LCSR_DCTL_VBKS_128 (2<<12) /* VMEbus block Size 128 */
-#define TSI148_LCSR_DCTL_VBKS_256 (3<<12) /* VMEbus block Size 256 */
-#define TSI148_LCSR_DCTL_VBKS_512 (4<<12) /* VMEbus block Size 512 */
-#define TSI148_LCSR_DCTL_VBKS_1024 (5<<12) /* VMEbus block Size 1024 */
-#define TSI148_LCSR_DCTL_VBKS_2048 (6<<12) /* VMEbus block Size 2048 */
-#define TSI148_LCSR_DCTL_VBKS_4096 (7<<12) /* VMEbus block Size 4096 */
-
-#define TSI148_LCSR_DCTL_VBOT_M (7<<8) /* VMEbus back-off MASK */
-#define TSI148_LCSR_DCTL_VBOT_0 (0<<8) /* VMEbus back-off 0us */
-#define TSI148_LCSR_DCTL_VBOT_1 (1<<8) /* VMEbus back-off 1us */
-#define TSI148_LCSR_DCTL_VBOT_2 (2<<8) /* VMEbus back-off 2us */
-#define TSI148_LCSR_DCTL_VBOT_4 (3<<8) /* VMEbus back-off 4us */
-#define TSI148_LCSR_DCTL_VBOT_8 (4<<8) /* VMEbus back-off 8us */
-#define TSI148_LCSR_DCTL_VBOT_16 (5<<8) /* VMEbus back-off 16us */
-#define TSI148_LCSR_DCTL_VBOT_32 (6<<8) /* VMEbus back-off 32us */
-#define TSI148_LCSR_DCTL_VBOT_64 (7<<8) /* VMEbus back-off 64us */
-
-#define TSI148_LCSR_DCTL_PBKS_M (7<<4) /* PCI block size MASK */
-#define TSI148_LCSR_DCTL_PBKS_32 (0<<4) /* PCI block size 32 bytes */
-#define TSI148_LCSR_DCTL_PBKS_64 (1<<4) /* PCI block size 64 bytes */
-#define TSI148_LCSR_DCTL_PBKS_128 (2<<4) /* PCI block size 128 bytes */
-#define TSI148_LCSR_DCTL_PBKS_256 (3<<4) /* PCI block size 256 bytes */
-#define TSI148_LCSR_DCTL_PBKS_512 (4<<4) /* PCI block size 512 bytes */
-#define TSI148_LCSR_DCTL_PBKS_1024 (5<<4) /* PCI block size 1024 bytes */
-#define TSI148_LCSR_DCTL_PBKS_2048 (6<<4) /* PCI block size 2048 bytes */
-#define TSI148_LCSR_DCTL_PBKS_4096 (7<<4) /* PCI block size 4096 bytes */
-
-#define TSI148_LCSR_DCTL_PBOT_M (7<<0) /* PCI back off MASK */
-#define TSI148_LCSR_DCTL_PBOT_0 (0<<0) /* PCI back off 0us */
-#define TSI148_LCSR_DCTL_PBOT_1 (1<<0) /* PCI back off 1us */
-#define TSI148_LCSR_DCTL_PBOT_2 (2<<0) /* PCI back off 2us */
-#define TSI148_LCSR_DCTL_PBOT_4 (3<<0) /* PCI back off 3us */
-#define TSI148_LCSR_DCTL_PBOT_8 (4<<0) /* PCI back off 4us */
-#define TSI148_LCSR_DCTL_PBOT_16 (5<<0) /* PCI back off 8us */
-#define TSI148_LCSR_DCTL_PBOT_32 (6<<0) /* PCI back off 16us */
-#define TSI148_LCSR_DCTL_PBOT_64 (7<<0) /* PCI back off 32us */
-
-/*
- * DMA Status Registers (0-1) CRG + $504
- */
-#define TSI148_LCSR_DSTA_SMA (1<<31) /* PCI Signalled Master Abt */
-#define TSI148_LCSR_DSTA_RTA (1<<30) /* PCI Received Target Abt */
-#define TSI148_LCSR_DSTA_MRC (1<<29) /* PCI Max Retry Count */
-#define TSI148_LCSR_DSTA_VBE (1<<28) /* VMEbus error */
-#define TSI148_LCSR_DSTA_ABT (1<<27) /* Abort */
-#define TSI148_LCSR_DSTA_PAU (1<<26) /* Pause */
-#define TSI148_LCSR_DSTA_DON (1<<25) /* Done */
-#define TSI148_LCSR_DSTA_BSY (1<<24) /* Busy */
-
-/*
- * DMA Current Link Address Lower (0-1)
- */
-#define TSI148_LCSR_DCLAL_M (0x3FFFFFF<<6) /* Mask */
-
-/*
- * DMA Source Attribute (0-1) Reg
- */
-#define TSI148_LCSR_DSAT_TYP_M (3<<28) /* Source Bus Type */
-#define TSI148_LCSR_DSAT_TYP_PCI (0<<28) /* PCI Bus */
-#define TSI148_LCSR_DSAT_TYP_VME (1<<28) /* VMEbus */
-#define TSI148_LCSR_DSAT_TYP_PAT (2<<28) /* Data Pattern */
-
-#define TSI148_LCSR_DSAT_PSZ (1<<25) /* Pattern Size */
-#define TSI148_LCSR_DSAT_NIN (1<<24) /* No Increment */
-
-#define TSI148_LCSR_DSAT_2eSSTM_M (3<<11) /* 2eSST Trans Rate Mask */
-#define TSI148_LCSR_DSAT_2eSSTM_160 (0<<11) /* 160 MB/s */
-#define TSI148_LCSR_DSAT_2eSSTM_267 (1<<11) /* 267 MB/s */
-#define TSI148_LCSR_DSAT_2eSSTM_320 (2<<11) /* 320 MB/s */
-
-#define TSI148_LCSR_DSAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */
-#define TSI148_LCSR_DSAT_TM_SCT (0<<8) /* SCT */
-#define TSI148_LCSR_DSAT_TM_BLT (1<<8) /* BLT */
-#define TSI148_LCSR_DSAT_TM_MBLT (2<<8) /* MBLT */
-#define TSI148_LCSR_DSAT_TM_2eVME (3<<8) /* 2eVME */
-#define TSI148_LCSR_DSAT_TM_2eSST (4<<8) /* 2eSST */
-#define TSI148_LCSR_DSAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */
-
-#define TSI148_LCSR_DSAT_DBW_M (3<<6) /* Max Data Width MASK */
-#define TSI148_LCSR_DSAT_DBW_16 (0<<6) /* 16 Bits */
-#define TSI148_LCSR_DSAT_DBW_32 (1<<6) /* 32 Bits */
-
-#define TSI148_LCSR_DSAT_SUP (1<<5) /* Supervisory Mode */
-#define TSI148_LCSR_DSAT_PGM (1<<4) /* Program Mode */
-
-#define TSI148_LCSR_DSAT_AMODE_M (0xf<<0) /* Address Space Mask */
-#define TSI148_LCSR_DSAT_AMODE_A16 (0<<0) /* A16 */
-#define TSI148_LCSR_DSAT_AMODE_A24 (1<<0) /* A24 */
-#define TSI148_LCSR_DSAT_AMODE_A32 (2<<0) /* A32 */
-#define TSI148_LCSR_DSAT_AMODE_A64 (4<<0) /* A64 */
-#define TSI148_LCSR_DSAT_AMODE_CRCSR (5<<0) /* CR/CSR */
-#define TSI148_LCSR_DSAT_AMODE_USER1 (8<<0) /* User1 */
-#define TSI148_LCSR_DSAT_AMODE_USER2 (9<<0) /* User2 */
-#define TSI148_LCSR_DSAT_AMODE_USER3 (0xa<<0) /* User3 */
-#define TSI148_LCSR_DSAT_AMODE_USER4 (0xb<<0) /* User4 */
-
-/*
- * DMA Destination Attribute Registers (0-1)
- */
-#define TSI148_LCSR_DDAT_TYP_PCI (0<<28) /* Destination PCI Bus */
-#define TSI148_LCSR_DDAT_TYP_VME (1<<28) /* Destination VMEbus */
-
-#define TSI148_LCSR_DDAT_2eSSTM_M (3<<11) /* 2eSST Transfer Rate Mask */
-#define TSI148_LCSR_DDAT_2eSSTM_160 (0<<11) /* 160 MB/s */
-#define TSI148_LCSR_DDAT_2eSSTM_267 (1<<11) /* 267 MB/s */
-#define TSI148_LCSR_DDAT_2eSSTM_320 (2<<11) /* 320 MB/s */
-
-#define TSI148_LCSR_DDAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */
-#define TSI148_LCSR_DDAT_TM_SCT (0<<8) /* SCT */
-#define TSI148_LCSR_DDAT_TM_BLT (1<<8) /* BLT */
-#define TSI148_LCSR_DDAT_TM_MBLT (2<<8) /* MBLT */
-#define TSI148_LCSR_DDAT_TM_2eVME (3<<8) /* 2eVME */
-#define TSI148_LCSR_DDAT_TM_2eSST (4<<8) /* 2eSST */
-#define TSI148_LCSR_DDAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */
-
-#define TSI148_LCSR_DDAT_DBW_M (3<<6) /* Max Data Width MASK */
-#define TSI148_LCSR_DDAT_DBW_16 (0<<6) /* 16 Bits */
-#define TSI148_LCSR_DDAT_DBW_32 (1<<6) /* 32 Bits */
-
-#define TSI148_LCSR_DDAT_SUP (1<<5) /* Supervisory/User Access */
-#define TSI148_LCSR_DDAT_PGM (1<<4) /* Program/Data Access */
-
-#define TSI148_LCSR_DDAT_AMODE_M (0xf<<0) /* Address Space Mask */
-#define TSI148_LCSR_DDAT_AMODE_A16 (0<<0) /* A16 */
-#define TSI148_LCSR_DDAT_AMODE_A24 (1<<0) /* A24 */
-#define TSI148_LCSR_DDAT_AMODE_A32 (2<<0) /* A32 */
-#define TSI148_LCSR_DDAT_AMODE_A64 (4<<0) /* A64 */
-#define TSI148_LCSR_DDAT_AMODE_CRCSR (5<<0) /* CRC/SR */
-#define TSI148_LCSR_DDAT_AMODE_USER1 (8<<0) /* User1 */
-#define TSI148_LCSR_DDAT_AMODE_USER2 (9<<0) /* User2 */
-#define TSI148_LCSR_DDAT_AMODE_USER3 (0xa<<0) /* User3 */
-#define TSI148_LCSR_DDAT_AMODE_USER4 (0xb<<0) /* User4 */
-
-/*
- * DMA Next Link Address Lower
- */
-#define TSI148_LCSR_DNLAL_DNLAL_M (0x3FFFFFF<<6) /* Address Mask */
-#define TSI148_LCSR_DNLAL_LLA (1<<0) /* Last Link Address Indicator */
-
-/*
- * DMA 2eSST Broadcast Select
- */
-#define TSI148_LCSR_DBS_M (0x1FFFFF<<0) /* Mask */
-
-/*
- * GCSR Register Group
- */
-
-/*
- * GCSR Control and Status Register CRG + $604
- */
-#define TSI148_GCSR_GCTRL_LRST (1<<15) /* Local Reset */
-#define TSI148_GCSR_GCTRL_SFAILEN (1<<14) /* System Fail enable */
-#define TSI148_GCSR_GCTRL_BDFAILS (1<<13) /* Board Fail Status */
-#define TSI148_GCSR_GCTRL_SCON (1<<12) /* System Copntroller */
-#define TSI148_GCSR_GCTRL_MEN (1<<11) /* Module Enable (READY) */
-
-#define TSI148_GCSR_GCTRL_LMI3S (1<<7) /* Loc Monitor 3 Int Status */
-#define TSI148_GCSR_GCTRL_LMI2S (1<<6) /* Loc Monitor 2 Int Status */
-#define TSI148_GCSR_GCTRL_LMI1S (1<<5) /* Loc Monitor 1 Int Status */
-#define TSI148_GCSR_GCTRL_LMI0S (1<<4) /* Loc Monitor 0 Int Status */
-#define TSI148_GCSR_GCTRL_MBI3S (1<<3) /* Mail box 3 Int Status */
-#define TSI148_GCSR_GCTRL_MBI2S (1<<2) /* Mail box 2 Int Status */
-#define TSI148_GCSR_GCTRL_MBI1S (1<<1) /* Mail box 1 Int Status */
-#define TSI148_GCSR_GCTRL_MBI0S (1<<0) /* Mail box 0 Int Status */
-
-#define TSI148_GCSR_GAP (1<<5) /* Geographic Addr Parity */
-#define TSI148_GCSR_GA_M (0x1F<<0) /* Geographic Address Mask */
-
-/*
- * CR/CSR Register Group
- */
-
-/*
- * CR/CSR Bit Clear Register CRG + $FF4
- */
-#define TSI148_CRCSR_CSRBCR_LRSTC (1<<7) /* Local Reset Clear */
-#define TSI148_CRCSR_CSRBCR_SFAILC (1<<6) /* System Fail Enable Clear */
-#define TSI148_CRCSR_CSRBCR_BDFAILS (1<<5) /* Board Fail Status */
-#define TSI148_CRCSR_CSRBCR_MENC (1<<4) /* Module Enable Clear */
-#define TSI148_CRCSR_CSRBCR_BERRSC (1<<3) /* Bus Error Status Clear */
-
-/*
- * CR/CSR Bit Set Register CRG+$FF8
- */
-#define TSI148_CRCSR_CSRBSR_LISTS (1<<7) /* Local Reset Clear */
-#define TSI148_CRCSR_CSRBSR_SFAILS (1<<6) /* System Fail Enable Clear */
-#define TSI148_CRCSR_CSRBSR_BDFAILS (1<<5) /* Board Fail Status */
-#define TSI148_CRCSR_CSRBSR_MENS (1<<4) /* Module Enable Clear */
-#define TSI148_CRCSR_CSRBSR_BERRS (1<<3) /* Bus Error Status Clear */
-
-/*
- * CR/CSR Base Address Register CRG + FFC
- */
-#define TSI148_CRCSR_CBAR_M (0x1F<<3) /* Mask */
-
-#endif /* TSI148_H */
config VME_USER
tristate "VME user space access driver"
+ depends on STAGING
help
If you say Y here you want to be able to access a limited number of
VME windows in a manner at least semi-compatible with the interface
config VME_PIO2
tristate "GE PIO2 VME"
- depends on GPIOLIB
+ depends on STAGING && GPIOLIB
help
Say Y here to include support for the GE PIO2. The PIO2 is a 6U VME
slave card, implementing 32 solid-state relay switched IO lines, in
#include <linux/device.h>
#include <linux/types.h>
#include <linux/gpio.h>
+#include <linux/vme.h>
-#include "../vme.h"
#include "vme_pio2.h"
static int pio2_cntr_irq_set(struct pio2_card *card, int id)
#include <linux/ctype.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/vme.h>
-#include "../vme.h"
#include "vme_pio2.h"
#include <linux/ctype.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/vme.h>
-#include "../vme.h"
#include "vme_pio2.h"
static const char driver_name[] = "pio2_gpio";
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/vme.h>
-#include "../vme.h"
#include "vme_user.h"
static DEFINE_MUTEX(vme_user_mutex);
+++ /dev/null
-/*
- * VME Bridge Framework
- *
- * Author: Martyn Welch <martyn.welch@ge.com>
- * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
- *
- * Based on work by Tom Armistead and Ajit Prem
- * Copyright 2004 Motorola Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/highmem.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/syscalls.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-
-#include "vme.h"
-#include "vme_bridge.h"
-
-/* Bitmask and list of registered buses both protected by common mutex */
-static unsigned int vme_bus_numbers;
-static LIST_HEAD(vme_bus_list);
-static DEFINE_MUTEX(vme_buses_lock);
-
-static void __exit vme_exit(void);
-static int __init vme_init(void);
-
-static struct vme_dev *dev_to_vme_dev(struct device *dev)
-{
- return container_of(dev, struct vme_dev, dev);
-}
-
-/*
- * Find the bridge that the resource is associated with.
- */
-static struct vme_bridge *find_bridge(struct vme_resource *resource)
-{
- /* Get list to search */
- switch (resource->type) {
- case VME_MASTER:
- return list_entry(resource->entry, struct vme_master_resource,
- list)->parent;
- break;
- case VME_SLAVE:
- return list_entry(resource->entry, struct vme_slave_resource,
- list)->parent;
- break;
- case VME_DMA:
- return list_entry(resource->entry, struct vme_dma_resource,
- list)->parent;
- break;
- case VME_LM:
- return list_entry(resource->entry, struct vme_lm_resource,
- list)->parent;
- break;
- default:
- printk(KERN_ERR "Unknown resource type\n");
- return NULL;
- break;
- }
-}
-
-/*
- * Allocate a contiguous block of memory for use by the driver. This is used to
- * create the buffers for the slave windows.
- */
-void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
- dma_addr_t *dma)
-{
- struct vme_bridge *bridge;
-
- if (resource == NULL) {
- printk(KERN_ERR "No resource\n");
- return NULL;
- }
-
- bridge = find_bridge(resource);
- if (bridge == NULL) {
- printk(KERN_ERR "Can't find bridge\n");
- return NULL;
- }
-
- if (bridge->parent == NULL) {
- printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
- return NULL;
- }
-
- if (bridge->alloc_consistent == NULL) {
- printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
- bridge->name);
- return NULL;
- }
-
- return bridge->alloc_consistent(bridge->parent, size, dma);
-}
-EXPORT_SYMBOL(vme_alloc_consistent);
-
-/*
- * Free previously allocated contiguous block of memory.
- */
-void vme_free_consistent(struct vme_resource *resource, size_t size,
- void *vaddr, dma_addr_t dma)
-{
- struct vme_bridge *bridge;
-
- if (resource == NULL) {
- printk(KERN_ERR "No resource\n");
- return;
- }
-
- bridge = find_bridge(resource);
- if (bridge == NULL) {
- printk(KERN_ERR "Can't find bridge\n");
- return;
- }
-
- if (bridge->parent == NULL) {
- printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
- return;
- }
-
- if (bridge->free_consistent == NULL) {
- printk(KERN_ERR "free_consistent not supported by bridge %s\n",
- bridge->name);
- return;
- }
-
- bridge->free_consistent(bridge->parent, size, vaddr, dma);
-}
-EXPORT_SYMBOL(vme_free_consistent);
-
-size_t vme_get_size(struct vme_resource *resource)
-{
- int enabled, retval;
- unsigned long long base, size;
- dma_addr_t buf_base;
- u32 aspace, cycle, dwidth;
-
- switch (resource->type) {
- case VME_MASTER:
- retval = vme_master_get(resource, &enabled, &base, &size,
- &aspace, &cycle, &dwidth);
-
- return size;
- break;
- case VME_SLAVE:
- retval = vme_slave_get(resource, &enabled, &base, &size,
- &buf_base, &aspace, &cycle);
-
- return size;
- break;
- case VME_DMA:
- return 0;
- break;
- default:
- printk(KERN_ERR "Unknown resource type\n");
- return 0;
- break;
- }
-}
-EXPORT_SYMBOL(vme_get_size);
-
-static int vme_check_window(u32 aspace, unsigned long long vme_base,
- unsigned long long size)
-{
- int retval = 0;
-
- switch (aspace) {
- case VME_A16:
- if (((vme_base + size) > VME_A16_MAX) ||
- (vme_base > VME_A16_MAX))
- retval = -EFAULT;
- break;
- case VME_A24:
- if (((vme_base + size) > VME_A24_MAX) ||
- (vme_base > VME_A24_MAX))
- retval = -EFAULT;
- break;
- case VME_A32:
- if (((vme_base + size) > VME_A32_MAX) ||
- (vme_base > VME_A32_MAX))
- retval = -EFAULT;
- break;
- case VME_A64:
- /*
- * Any value held in an unsigned long long can be used as the
- * base
- */
- break;
- case VME_CRCSR:
- if (((vme_base + size) > VME_CRCSR_MAX) ||
- (vme_base > VME_CRCSR_MAX))
- retval = -EFAULT;
- break;
- case VME_USER1:
- case VME_USER2:
- case VME_USER3:
- case VME_USER4:
- /* User Defined */
- break;
- default:
- printk(KERN_ERR "Invalid address space\n");
- retval = -EINVAL;
- break;
- }
-
- return retval;
-}
-
-/*
- * Request a slave image with specific attributes, return some unique
- * identifier.
- */
-struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
- u32 cycle)
-{
- struct vme_bridge *bridge;
- struct list_head *slave_pos = NULL;
- struct vme_slave_resource *allocated_image = NULL;
- struct vme_slave_resource *slave_image = NULL;
- struct vme_resource *resource = NULL;
-
- bridge = vdev->bridge;
- if (bridge == NULL) {
- printk(KERN_ERR "Can't find VME bus\n");
- goto err_bus;
- }
-
- /* Loop through slave resources */
- list_for_each(slave_pos, &bridge->slave_resources) {
- slave_image = list_entry(slave_pos,
- struct vme_slave_resource, list);
-
- if (slave_image == NULL) {
- printk(KERN_ERR "Registered NULL Slave resource\n");
- continue;
- }
-
- /* Find an unlocked and compatible image */
- mutex_lock(&slave_image->mtx);
- if (((slave_image->address_attr & address) == address) &&
- ((slave_image->cycle_attr & cycle) == cycle) &&
- (slave_image->locked == 0)) {
-
- slave_image->locked = 1;
- mutex_unlock(&slave_image->mtx);
- allocated_image = slave_image;
- break;
- }
- mutex_unlock(&slave_image->mtx);
- }
-
- /* No free image */
- if (allocated_image == NULL)
- goto err_image;
-
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_WARNING "Unable to allocate resource structure\n");
- goto err_alloc;
- }
- resource->type = VME_SLAVE;
- resource->entry = &allocated_image->list;
-
- return resource;
-
-err_alloc:
- /* Unlock image */
- mutex_lock(&slave_image->mtx);
- slave_image->locked = 0;
- mutex_unlock(&slave_image->mtx);
-err_image:
-err_bus:
- return NULL;
-}
-EXPORT_SYMBOL(vme_slave_request);
-
-int vme_slave_set(struct vme_resource *resource, int enabled,
- unsigned long long vme_base, unsigned long long size,
- dma_addr_t buf_base, u32 aspace, u32 cycle)
-{
- struct vme_bridge *bridge = find_bridge(resource);
- struct vme_slave_resource *image;
- int retval;
-
- if (resource->type != VME_SLAVE) {
- printk(KERN_ERR "Not a slave resource\n");
- return -EINVAL;
- }
-
- image = list_entry(resource->entry, struct vme_slave_resource, list);
-
- if (bridge->slave_set == NULL) {
- printk(KERN_ERR "Function not supported\n");
- return -ENOSYS;
- }
-
- if (!(((image->address_attr & aspace) == aspace) &&
- ((image->cycle_attr & cycle) == cycle))) {
- printk(KERN_ERR "Invalid attributes\n");
- return -EINVAL;
- }
-
- retval = vme_check_window(aspace, vme_base, size);
- if (retval)
- return retval;
-
- return bridge->slave_set(image, enabled, vme_base, size, buf_base,
- aspace, cycle);
-}
-EXPORT_SYMBOL(vme_slave_set);
-
-int vme_slave_get(struct vme_resource *resource, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
-{
- struct vme_bridge *bridge = find_bridge(resource);
- struct vme_slave_resource *image;
-
- if (resource->type != VME_SLAVE) {
- printk(KERN_ERR "Not a slave resource\n");
- return -EINVAL;
- }
-
- image = list_entry(resource->entry, struct vme_slave_resource, list);
-
- if (bridge->slave_get == NULL) {
- printk(KERN_ERR "vme_slave_get not supported\n");
- return -EINVAL;
- }
-
- return bridge->slave_get(image, enabled, vme_base, size, buf_base,
- aspace, cycle);
-}
-EXPORT_SYMBOL(vme_slave_get);
-
-void vme_slave_free(struct vme_resource *resource)
-{
- struct vme_slave_resource *slave_image;
-
- if (resource->type != VME_SLAVE) {
- printk(KERN_ERR "Not a slave resource\n");
- return;
- }
-
- slave_image = list_entry(resource->entry, struct vme_slave_resource,
- list);
- if (slave_image == NULL) {
- printk(KERN_ERR "Can't find slave resource\n");
- return;
- }
-
- /* Unlock image */
- mutex_lock(&slave_image->mtx);
- if (slave_image->locked == 0)
- printk(KERN_ERR "Image is already free\n");
-
- slave_image->locked = 0;
- mutex_unlock(&slave_image->mtx);
-
- /* Free up resource memory */
- kfree(resource);
-}
-EXPORT_SYMBOL(vme_slave_free);
-
-/*
- * Request a master image with specific attributes, return some unique
- * identifier.
- */
-struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
- u32 cycle, u32 dwidth)
-{
- struct vme_bridge *bridge;
- struct list_head *master_pos = NULL;
- struct vme_master_resource *allocated_image = NULL;
- struct vme_master_resource *master_image = NULL;
- struct vme_resource *resource = NULL;
-
- bridge = vdev->bridge;
- if (bridge == NULL) {
- printk(KERN_ERR "Can't find VME bus\n");
- goto err_bus;
- }
-
- /* Loop through master resources */
- list_for_each(master_pos, &bridge->master_resources) {
- master_image = list_entry(master_pos,
- struct vme_master_resource, list);
-
- if (master_image == NULL) {
- printk(KERN_WARNING "Registered NULL master resource\n");
- continue;
- }
-
- /* Find an unlocked and compatible image */
- spin_lock(&master_image->lock);
- if (((master_image->address_attr & address) == address) &&
- ((master_image->cycle_attr & cycle) == cycle) &&
- ((master_image->width_attr & dwidth) == dwidth) &&
- (master_image->locked == 0)) {
-
- master_image->locked = 1;
- spin_unlock(&master_image->lock);
- allocated_image = master_image;
- break;
- }
- spin_unlock(&master_image->lock);
- }
-
- /* Check to see if we found a resource */
- if (allocated_image == NULL) {
- printk(KERN_ERR "Can't find a suitable resource\n");
- goto err_image;
- }
-
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_ERR "Unable to allocate resource structure\n");
- goto err_alloc;
- }
- resource->type = VME_MASTER;
- resource->entry = &allocated_image->list;
-
- return resource;
-
-err_alloc:
- /* Unlock image */
- spin_lock(&master_image->lock);
- master_image->locked = 0;
- spin_unlock(&master_image->lock);
-err_image:
-err_bus:
- return NULL;
-}
-EXPORT_SYMBOL(vme_master_request);
-
-int vme_master_set(struct vme_resource *resource, int enabled,
- unsigned long long vme_base, unsigned long long size, u32 aspace,
- u32 cycle, u32 dwidth)
-{
- struct vme_bridge *bridge = find_bridge(resource);
- struct vme_master_resource *image;
- int retval;
-
- if (resource->type != VME_MASTER) {
- printk(KERN_ERR "Not a master resource\n");
- return -EINVAL;
- }
-
- image = list_entry(resource->entry, struct vme_master_resource, list);
-
- if (bridge->master_set == NULL) {
- printk(KERN_WARNING "vme_master_set not supported\n");
- return -EINVAL;
- }
-
- if (!(((image->address_attr & aspace) == aspace) &&
- ((image->cycle_attr & cycle) == cycle) &&
- ((image->width_attr & dwidth) == dwidth))) {
- printk(KERN_WARNING "Invalid attributes\n");
- return -EINVAL;
- }
-
- retval = vme_check_window(aspace, vme_base, size);
- if (retval)
- return retval;
-
- return bridge->master_set(image, enabled, vme_base, size, aspace,
- cycle, dwidth);
-}
-EXPORT_SYMBOL(vme_master_set);
-
-int vme_master_get(struct vme_resource *resource, int *enabled,
- unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
- u32 *cycle, u32 *dwidth)
-{
- struct vme_bridge *bridge = find_bridge(resource);
- struct vme_master_resource *image;
-
- if (resource->type != VME_MASTER) {
- printk(KERN_ERR "Not a master resource\n");
- return -EINVAL;
- }
-
- image = list_entry(resource->entry, struct vme_master_resource, list);
-
- if (bridge->master_get == NULL) {
- printk(KERN_WARNING "vme_master_set not supported\n");
- return -EINVAL;
- }
-
- return bridge->master_get(image, enabled, vme_base, size, aspace,
- cycle, dwidth);
-}
-EXPORT_SYMBOL(vme_master_get);
-
-/*
- * Read data out of VME space into a buffer.
- */
-ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
- loff_t offset)
-{
- struct vme_bridge *bridge = find_bridge(resource);
- struct vme_master_resource *image;
- size_t length;
-
- if (bridge->master_read == NULL) {
- printk(KERN_WARNING "Reading from resource not supported\n");
- return -EINVAL;
- }
-
- if (resource->type != VME_MASTER) {
- printk(KERN_ERR "Not a master resource\n");
- return -EINVAL;
- }
-
- image = list_entry(resource->entry, struct vme_master_resource, list);
-
- length = vme_get_size(resource);
-
- if (offset > length) {
- printk(KERN_WARNING "Invalid Offset\n");
- return -EFAULT;
- }
-
- if ((offset + count) > length)
- count = length - offset;
-
- return bridge->master_read(image, buf, count, offset);
-
-}
-EXPORT_SYMBOL(vme_master_read);
-
-/*
- * Write data out to VME space from a buffer.
- */
-ssize_t vme_master_write(struct vme_resource *resource, void *buf,
- size_t count, loff_t offset)
-{
- struct vme_bridge *bridge = find_bridge(resource);
- struct vme_master_resource *image;
- size_t length;
-
- if (bridge->master_write == NULL) {
- printk(KERN_WARNING "Writing to resource not supported\n");
- return -EINVAL;
- }
-
- if (resource->type != VME_MASTER) {
- printk(KERN_ERR "Not a master resource\n");
- return -EINVAL;
- }
-
- image = list_entry(resource->entry, struct vme_master_resource, list);
-
- length = vme_get_size(resource);
-
- if (offset > length) {
- printk(KERN_WARNING "Invalid Offset\n");
- return -EFAULT;
- }
-
- if ((offset + count) > length)
- count = length - offset;
-
- return bridge->master_write(image, buf, count, offset);
-}
-EXPORT_SYMBOL(vme_master_write);
-
-/*
- * Perform RMW cycle to provided location.
- */
-unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
- unsigned int compare, unsigned int swap, loff_t offset)
-{
- struct vme_bridge *bridge = find_bridge(resource);
- struct vme_master_resource *image;
-
- if (bridge->master_rmw == NULL) {
- printk(KERN_WARNING "Writing to resource not supported\n");
- return -EINVAL;
- }
-
- if (resource->type != VME_MASTER) {
- printk(KERN_ERR "Not a master resource\n");
- return -EINVAL;
- }
-
- image = list_entry(resource->entry, struct vme_master_resource, list);
-
- return bridge->master_rmw(image, mask, compare, swap, offset);
-}
-EXPORT_SYMBOL(vme_master_rmw);
-
-void vme_master_free(struct vme_resource *resource)
-{
- struct vme_master_resource *master_image;
-
- if (resource->type != VME_MASTER) {
- printk(KERN_ERR "Not a master resource\n");
- return;
- }
-
- master_image = list_entry(resource->entry, struct vme_master_resource,
- list);
- if (master_image == NULL) {
- printk(KERN_ERR "Can't find master resource\n");
- return;
- }
-
- /* Unlock image */
- spin_lock(&master_image->lock);
- if (master_image->locked == 0)
- printk(KERN_ERR "Image is already free\n");
-
- master_image->locked = 0;
- spin_unlock(&master_image->lock);
-
- /* Free up resource memory */
- kfree(resource);
-}
-EXPORT_SYMBOL(vme_master_free);
-
-/*
- * Request a DMA controller with specific attributes, return some unique
- * identifier.
- */
-struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
-{
- struct vme_bridge *bridge;
- struct list_head *dma_pos = NULL;
- struct vme_dma_resource *allocated_ctrlr = NULL;
- struct vme_dma_resource *dma_ctrlr = NULL;
- struct vme_resource *resource = NULL;
-
- /* XXX Not checking resource attributes */
- printk(KERN_ERR "No VME resource Attribute tests done\n");
-
- bridge = vdev->bridge;
- if (bridge == NULL) {
- printk(KERN_ERR "Can't find VME bus\n");
- goto err_bus;
- }
-
- /* Loop through DMA resources */
- list_for_each(dma_pos, &bridge->dma_resources) {
- dma_ctrlr = list_entry(dma_pos,
- struct vme_dma_resource, list);
-
- if (dma_ctrlr == NULL) {
- printk(KERN_ERR "Registered NULL DMA resource\n");
- continue;
- }
-
- /* Find an unlocked and compatible controller */
- mutex_lock(&dma_ctrlr->mtx);
- if (((dma_ctrlr->route_attr & route) == route) &&
- (dma_ctrlr->locked == 0)) {
-
- dma_ctrlr->locked = 1;
- mutex_unlock(&dma_ctrlr->mtx);
- allocated_ctrlr = dma_ctrlr;
- break;
- }
- mutex_unlock(&dma_ctrlr->mtx);
- }
-
- /* Check to see if we found a resource */
- if (allocated_ctrlr == NULL)
- goto err_ctrlr;
-
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_WARNING "Unable to allocate resource structure\n");
- goto err_alloc;
- }
- resource->type = VME_DMA;
- resource->entry = &allocated_ctrlr->list;
-
- return resource;
-
-err_alloc:
- /* Unlock image */
- mutex_lock(&dma_ctrlr->mtx);
- dma_ctrlr->locked = 0;
- mutex_unlock(&dma_ctrlr->mtx);
-err_ctrlr:
-err_bus:
- return NULL;
-}
-EXPORT_SYMBOL(vme_dma_request);
-
-/*
- * Start new list
- */
-struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
-{
- struct vme_dma_resource *ctrlr;
- struct vme_dma_list *dma_list;
-
- if (resource->type != VME_DMA) {
- printk(KERN_ERR "Not a DMA resource\n");
- return NULL;
- }
-
- ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
-
- dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
- if (dma_list == NULL) {
- printk(KERN_ERR "Unable to allocate memory for new dma list\n");
- return NULL;
- }
- INIT_LIST_HEAD(&dma_list->entries);
- dma_list->parent = ctrlr;
- mutex_init(&dma_list->mtx);
-
- return dma_list;
-}
-EXPORT_SYMBOL(vme_new_dma_list);
-
-/*
- * Create "Pattern" type attributes
- */
-struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
-{
- struct vme_dma_attr *attributes;
- struct vme_dma_pattern *pattern_attr;
-
- attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
- if (attributes == NULL) {
- printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
- goto err_attr;
- }
-
- pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
- if (pattern_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
- goto err_pat;
- }
-
- attributes->type = VME_DMA_PATTERN;
- attributes->private = (void *)pattern_attr;
-
- pattern_attr->pattern = pattern;
- pattern_attr->type = type;
-
- return attributes;
-
-err_pat:
- kfree(attributes);
-err_attr:
- return NULL;
-}
-EXPORT_SYMBOL(vme_dma_pattern_attribute);
-
-/*
- * Create "PCI" type attributes
- */
-struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
-{
- struct vme_dma_attr *attributes;
- struct vme_dma_pci *pci_attr;
-
- /* XXX Run some sanity checks here */
-
- attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
- if (attributes == NULL) {
- printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
- goto err_attr;
- }
-
- pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
- if (pci_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for pci attributes\n");
- goto err_pci;
- }
-
-
-
- attributes->type = VME_DMA_PCI;
- attributes->private = (void *)pci_attr;
-
- pci_attr->address = address;
-
- return attributes;
-
-err_pci:
- kfree(attributes);
-err_attr:
- return NULL;
-}
-EXPORT_SYMBOL(vme_dma_pci_attribute);
-
-/*
- * Create "VME" type attributes
- */
-struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
- u32 aspace, u32 cycle, u32 dwidth)
-{
- struct vme_dma_attr *attributes;
- struct vme_dma_vme *vme_attr;
-
- attributes = kmalloc(
- sizeof(struct vme_dma_attr), GFP_KERNEL);
- if (attributes == NULL) {
- printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
- goto err_attr;
- }
-
- vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
- if (vme_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for vme attributes\n");
- goto err_vme;
- }
-
- attributes->type = VME_DMA_VME;
- attributes->private = (void *)vme_attr;
-
- vme_attr->address = address;
- vme_attr->aspace = aspace;
- vme_attr->cycle = cycle;
- vme_attr->dwidth = dwidth;
-
- return attributes;
-
-err_vme:
- kfree(attributes);
-err_attr:
- return NULL;
-}
-EXPORT_SYMBOL(vme_dma_vme_attribute);
-
-/*
- * Free attribute
- */
-void vme_dma_free_attribute(struct vme_dma_attr *attributes)
-{
- kfree(attributes->private);
- kfree(attributes);
-}
-EXPORT_SYMBOL(vme_dma_free_attribute);
-
-int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
- struct vme_dma_attr *dest, size_t count)
-{
- struct vme_bridge *bridge = list->parent->parent;
- int retval;
-
- if (bridge->dma_list_add == NULL) {
- printk(KERN_WARNING "Link List DMA generation not supported\n");
- return -EINVAL;
- }
-
- if (!mutex_trylock(&list->mtx)) {
- printk(KERN_ERR "Link List already submitted\n");
- return -EINVAL;
- }
-
- retval = bridge->dma_list_add(list, src, dest, count);
-
- mutex_unlock(&list->mtx);
-
- return retval;
-}
-EXPORT_SYMBOL(vme_dma_list_add);
-
-int vme_dma_list_exec(struct vme_dma_list *list)
-{
- struct vme_bridge *bridge = list->parent->parent;
- int retval;
-
- if (bridge->dma_list_exec == NULL) {
- printk(KERN_ERR "Link List DMA execution not supported\n");
- return -EINVAL;
- }
-
- mutex_lock(&list->mtx);
-
- retval = bridge->dma_list_exec(list);
-
- mutex_unlock(&list->mtx);
-
- return retval;
-}
-EXPORT_SYMBOL(vme_dma_list_exec);
-
-int vme_dma_list_free(struct vme_dma_list *list)
-{
- struct vme_bridge *bridge = list->parent->parent;
- int retval;
-
- if (bridge->dma_list_empty == NULL) {
- printk(KERN_WARNING "Emptying of Link Lists not supported\n");
- return -EINVAL;
- }
-
- if (!mutex_trylock(&list->mtx)) {
- printk(KERN_ERR "Link List in use\n");
- return -EINVAL;
- }
-
- /*
- * Empty out all of the entries from the dma list. We need to go to the
- * low level driver as dma entries are driver specific.
- */
- retval = bridge->dma_list_empty(list);
- if (retval) {
- printk(KERN_ERR "Unable to empty link-list entries\n");
- mutex_unlock(&list->mtx);
- return retval;
- }
- mutex_unlock(&list->mtx);
- kfree(list);
-
- return retval;
-}
-EXPORT_SYMBOL(vme_dma_list_free);
-
-int vme_dma_free(struct vme_resource *resource)
-{
- struct vme_dma_resource *ctrlr;
-
- if (resource->type != VME_DMA) {
- printk(KERN_ERR "Not a DMA resource\n");
- return -EINVAL;
- }
-
- ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
-
- if (!mutex_trylock(&ctrlr->mtx)) {
- printk(KERN_ERR "Resource busy, can't free\n");
- return -EBUSY;
- }
-
- if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
- printk(KERN_WARNING "Resource still processing transfers\n");
- mutex_unlock(&ctrlr->mtx);
- return -EBUSY;
- }
-
- ctrlr->locked = 0;
-
- mutex_unlock(&ctrlr->mtx);
-
- return 0;
-}
-EXPORT_SYMBOL(vme_dma_free);
-
-void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
-{
- void (*call)(int, int, void *);
- void *priv_data;
-
- call = bridge->irq[level - 1].callback[statid].func;
- priv_data = bridge->irq[level - 1].callback[statid].priv_data;
-
- if (call != NULL)
- call(level, statid, priv_data);
- else
- printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n",
- level, statid);
-}
-EXPORT_SYMBOL(vme_irq_handler);
-
-int vme_irq_request(struct vme_dev *vdev, int level, int statid,
- void (*callback)(int, int, void *),
- void *priv_data)
-{
- struct vme_bridge *bridge;
-
- bridge = vdev->bridge;
- if (bridge == NULL) {
- printk(KERN_ERR "Can't find VME bus\n");
- return -EINVAL;
- }
-
- if ((level < 1) || (level > 7)) {
- printk(KERN_ERR "Invalid interrupt level\n");
- return -EINVAL;
- }
-
- if (bridge->irq_set == NULL) {
- printk(KERN_ERR "Configuring interrupts not supported\n");
- return -EINVAL;
- }
-
- mutex_lock(&bridge->irq_mtx);
-
- if (bridge->irq[level - 1].callback[statid].func) {
- mutex_unlock(&bridge->irq_mtx);
- printk(KERN_WARNING "VME Interrupt already taken\n");
- return -EBUSY;
- }
-
- bridge->irq[level - 1].count++;
- bridge->irq[level - 1].callback[statid].priv_data = priv_data;
- bridge->irq[level - 1].callback[statid].func = callback;
-
- /* Enable IRQ level */
- bridge->irq_set(bridge, level, 1, 1);
-
- mutex_unlock(&bridge->irq_mtx);
-
- return 0;
-}
-EXPORT_SYMBOL(vme_irq_request);
-
-void vme_irq_free(struct vme_dev *vdev, int level, int statid)
-{
- struct vme_bridge *bridge;
-
- bridge = vdev->bridge;
- if (bridge == NULL) {
- printk(KERN_ERR "Can't find VME bus\n");
- return;
- }
-
- if ((level < 1) || (level > 7)) {
- printk(KERN_ERR "Invalid interrupt level\n");
- return;
- }
-
- if (bridge->irq_set == NULL) {
- printk(KERN_ERR "Configuring interrupts not supported\n");
- return;
- }
-
- mutex_lock(&bridge->irq_mtx);
-
- bridge->irq[level - 1].count--;
-
- /* Disable IRQ level if no more interrupts attached at this level*/
- if (bridge->irq[level - 1].count == 0)
- bridge->irq_set(bridge, level, 0, 1);
-
- bridge->irq[level - 1].callback[statid].func = NULL;
- bridge->irq[level - 1].callback[statid].priv_data = NULL;
-
- mutex_unlock(&bridge->irq_mtx);
-}
-EXPORT_SYMBOL(vme_irq_free);
-
-int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
-{
- struct vme_bridge *bridge;
-
- bridge = vdev->bridge;
- if (bridge == NULL) {
- printk(KERN_ERR "Can't find VME bus\n");
- return -EINVAL;
- }
-
- if ((level < 1) || (level > 7)) {
- printk(KERN_WARNING "Invalid interrupt level\n");
- return -EINVAL;
- }
-
- if (bridge->irq_generate == NULL) {
- printk(KERN_WARNING "Interrupt generation not supported\n");
- return -EINVAL;
- }
-
- return bridge->irq_generate(bridge, level, statid);
-}
-EXPORT_SYMBOL(vme_irq_generate);
-
-/*
- * Request the location monitor, return resource or NULL
- */
-struct vme_resource *vme_lm_request(struct vme_dev *vdev)
-{
- struct vme_bridge *bridge;
- struct list_head *lm_pos = NULL;
- struct vme_lm_resource *allocated_lm = NULL;
- struct vme_lm_resource *lm = NULL;
- struct vme_resource *resource = NULL;
-
- bridge = vdev->bridge;
- if (bridge == NULL) {
- printk(KERN_ERR "Can't find VME bus\n");
- goto err_bus;
- }
-
- /* Loop through DMA resources */
- list_for_each(lm_pos, &bridge->lm_resources) {
- lm = list_entry(lm_pos,
- struct vme_lm_resource, list);
-
- if (lm == NULL) {
- printk(KERN_ERR "Registered NULL Location Monitor resource\n");
- continue;
- }
-
- /* Find an unlocked controller */
- mutex_lock(&lm->mtx);
- if (lm->locked == 0) {
- lm->locked = 1;
- mutex_unlock(&lm->mtx);
- allocated_lm = lm;
- break;
- }
- mutex_unlock(&lm->mtx);
- }
-
- /* Check to see if we found a resource */
- if (allocated_lm == NULL)
- goto err_lm;
-
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_ERR "Unable to allocate resource structure\n");
- goto err_alloc;
- }
- resource->type = VME_LM;
- resource->entry = &allocated_lm->list;
-
- return resource;
-
-err_alloc:
- /* Unlock image */
- mutex_lock(&lm->mtx);
- lm->locked = 0;
- mutex_unlock(&lm->mtx);
-err_lm:
-err_bus:
- return NULL;
-}
-EXPORT_SYMBOL(vme_lm_request);
-
-int vme_lm_count(struct vme_resource *resource)
-{
- struct vme_lm_resource *lm;
-
- if (resource->type != VME_LM) {
- printk(KERN_ERR "Not a Location Monitor resource\n");
- return -EINVAL;
- }
-
- lm = list_entry(resource->entry, struct vme_lm_resource, list);
-
- return lm->monitors;
-}
-EXPORT_SYMBOL(vme_lm_count);
-
-int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
- u32 aspace, u32 cycle)
-{
- struct vme_bridge *bridge = find_bridge(resource);
- struct vme_lm_resource *lm;
-
- if (resource->type != VME_LM) {
- printk(KERN_ERR "Not a Location Monitor resource\n");
- return -EINVAL;
- }
-
- lm = list_entry(resource->entry, struct vme_lm_resource, list);
-
- if (bridge->lm_set == NULL) {
- printk(KERN_ERR "vme_lm_set not supported\n");
- return -EINVAL;
- }
-
- return bridge->lm_set(lm, lm_base, aspace, cycle);
-}
-EXPORT_SYMBOL(vme_lm_set);
-
-int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
- u32 *aspace, u32 *cycle)
-{
- struct vme_bridge *bridge = find_bridge(resource);
- struct vme_lm_resource *lm;
-
- if (resource->type != VME_LM) {
- printk(KERN_ERR "Not a Location Monitor resource\n");
- return -EINVAL;
- }
-
- lm = list_entry(resource->entry, struct vme_lm_resource, list);
-
- if (bridge->lm_get == NULL) {
- printk(KERN_ERR "vme_lm_get not supported\n");
- return -EINVAL;
- }
-
- return bridge->lm_get(lm, lm_base, aspace, cycle);
-}
-EXPORT_SYMBOL(vme_lm_get);
-
-int vme_lm_attach(struct vme_resource *resource, int monitor,
- void (*callback)(int))
-{
- struct vme_bridge *bridge = find_bridge(resource);
- struct vme_lm_resource *lm;
-
- if (resource->type != VME_LM) {
- printk(KERN_ERR "Not a Location Monitor resource\n");
- return -EINVAL;
- }
-
- lm = list_entry(resource->entry, struct vme_lm_resource, list);
-
- if (bridge->lm_attach == NULL) {
- printk(KERN_ERR "vme_lm_attach not supported\n");
- return -EINVAL;
- }
-
- return bridge->lm_attach(lm, monitor, callback);
-}
-EXPORT_SYMBOL(vme_lm_attach);
-
-int vme_lm_detach(struct vme_resource *resource, int monitor)
-{
- struct vme_bridge *bridge = find_bridge(resource);
- struct vme_lm_resource *lm;
-
- if (resource->type != VME_LM) {
- printk(KERN_ERR "Not a Location Monitor resource\n");
- return -EINVAL;
- }
-
- lm = list_entry(resource->entry, struct vme_lm_resource, list);
-
- if (bridge->lm_detach == NULL) {
- printk(KERN_ERR "vme_lm_detach not supported\n");
- return -EINVAL;
- }
-
- return bridge->lm_detach(lm, monitor);
-}
-EXPORT_SYMBOL(vme_lm_detach);
-
-void vme_lm_free(struct vme_resource *resource)
-{
- struct vme_lm_resource *lm;
-
- if (resource->type != VME_LM) {
- printk(KERN_ERR "Not a Location Monitor resource\n");
- return;
- }
-
- lm = list_entry(resource->entry, struct vme_lm_resource, list);
-
- mutex_lock(&lm->mtx);
-
- /* XXX
- * Check to see that there aren't any callbacks still attached, if
- * there are we should probably be detaching them!
- */
-
- lm->locked = 0;
-
- mutex_unlock(&lm->mtx);
-
- kfree(resource);
-}
-EXPORT_SYMBOL(vme_lm_free);
-
-int vme_slot_get(struct vme_dev *vdev)
-{
- struct vme_bridge *bridge;
-
- bridge = vdev->bridge;
- if (bridge == NULL) {
- printk(KERN_ERR "Can't find VME bus\n");
- return -EINVAL;
- }
-
- if (bridge->slot_get == NULL) {
- printk(KERN_WARNING "vme_slot_get not supported\n");
- return -EINVAL;
- }
-
- return bridge->slot_get(bridge);
-}
-EXPORT_SYMBOL(vme_slot_get);
-
-
-/* - Bridge Registration --------------------------------------------------- */
-
-static void vme_dev_release(struct device *dev)
-{
- kfree(dev_to_vme_dev(dev));
-}
-
-int vme_register_bridge(struct vme_bridge *bridge)
-{
- int i;
- int ret = -1;
-
- mutex_lock(&vme_buses_lock);
- for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
- if ((vme_bus_numbers & (1 << i)) == 0) {
- vme_bus_numbers |= (1 << i);
- bridge->num = i;
- INIT_LIST_HEAD(&bridge->devices);
- list_add_tail(&bridge->bus_list, &vme_bus_list);
- ret = 0;
- break;
- }
- }
- mutex_unlock(&vme_buses_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(vme_register_bridge);
-
-void vme_unregister_bridge(struct vme_bridge *bridge)
-{
- struct vme_dev *vdev;
- struct vme_dev *tmp;
-
- mutex_lock(&vme_buses_lock);
- vme_bus_numbers &= ~(1 << bridge->num);
- list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
- list_del(&vdev->drv_list);
- list_del(&vdev->bridge_list);
- device_unregister(&vdev->dev);
- }
- list_del(&bridge->bus_list);
- mutex_unlock(&vme_buses_lock);
-}
-EXPORT_SYMBOL(vme_unregister_bridge);
-
-/* - Driver Registration --------------------------------------------------- */
-
-static int __vme_register_driver_bus(struct vme_driver *drv,
- struct vme_bridge *bridge, unsigned int ndevs)
-{
- int err;
- unsigned int i;
- struct vme_dev *vdev;
- struct vme_dev *tmp;
-
- for (i = 0; i < ndevs; i++) {
- vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
- if (!vdev) {
- err = -ENOMEM;
- goto err_devalloc;
- }
- vdev->num = i;
- vdev->bridge = bridge;
- vdev->dev.platform_data = drv;
- vdev->dev.release = vme_dev_release;
- vdev->dev.parent = bridge->parent;
- vdev->dev.bus = &vme_bus_type;
- dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
- vdev->num);
-
- err = device_register(&vdev->dev);
- if (err)
- goto err_reg;
-
- if (vdev->dev.platform_data) {
- list_add_tail(&vdev->drv_list, &drv->devices);
- list_add_tail(&vdev->bridge_list, &bridge->devices);
- } else
- device_unregister(&vdev->dev);
- }
- return 0;
-
-err_reg:
- kfree(vdev);
-err_devalloc:
- list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
- list_del(&vdev->drv_list);
- list_del(&vdev->bridge_list);
- device_unregister(&vdev->dev);
- }
- return err;
-}
-
-static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
-{
- struct vme_bridge *bridge;
- int err = 0;
-
- mutex_lock(&vme_buses_lock);
- list_for_each_entry(bridge, &vme_bus_list, bus_list) {
- /*
- * This cannot cause trouble as we already have vme_buses_lock
- * and if the bridge is removed, it will have to go through
- * vme_unregister_bridge() to do it (which calls remove() on
- * the bridge which in turn tries to acquire vme_buses_lock and
- * will have to wait).
- */
- err = __vme_register_driver_bus(drv, bridge, ndevs);
- if (err)
- break;
- }
- mutex_unlock(&vme_buses_lock);
- return err;
-}
-
-int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
-{
- int err;
-
- drv->driver.name = drv->name;
- drv->driver.bus = &vme_bus_type;
- INIT_LIST_HEAD(&drv->devices);
-
- err = driver_register(&drv->driver);
- if (err)
- return err;
-
- err = __vme_register_driver(drv, ndevs);
- if (err)
- driver_unregister(&drv->driver);
-
- return err;
-}
-EXPORT_SYMBOL(vme_register_driver);
-
-void vme_unregister_driver(struct vme_driver *drv)
-{
- struct vme_dev *dev, *dev_tmp;
-
- mutex_lock(&vme_buses_lock);
- list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
- list_del(&dev->drv_list);
- list_del(&dev->bridge_list);
- device_unregister(&dev->dev);
- }
- mutex_unlock(&vme_buses_lock);
-
- driver_unregister(&drv->driver);
-}
-EXPORT_SYMBOL(vme_unregister_driver);
-
-/* - Bus Registration ------------------------------------------------------ */
-
-static int vme_bus_match(struct device *dev, struct device_driver *drv)
-{
- struct vme_driver *vme_drv;
-
- vme_drv = container_of(drv, struct vme_driver, driver);
-
- if (dev->platform_data == vme_drv) {
- struct vme_dev *vdev = dev_to_vme_dev(dev);
-
- if (vme_drv->match && vme_drv->match(vdev))
- return 1;
-
- dev->platform_data = NULL;
- }
- return 0;
-}
-
-static int vme_bus_probe(struct device *dev)
-{
- int retval = -ENODEV;
- struct vme_driver *driver;
- struct vme_dev *vdev = dev_to_vme_dev(dev);
-
- driver = dev->platform_data;
-
- if (driver->probe != NULL)
- retval = driver->probe(vdev);
-
- return retval;
-}
-
-static int vme_bus_remove(struct device *dev)
-{
- int retval = -ENODEV;
- struct vme_driver *driver;
- struct vme_dev *vdev = dev_to_vme_dev(dev);
-
- driver = dev->platform_data;
-
- if (driver->remove != NULL)
- retval = driver->remove(vdev);
-
- return retval;
-}
-
-struct bus_type vme_bus_type = {
- .name = "vme",
- .match = vme_bus_match,
- .probe = vme_bus_probe,
- .remove = vme_bus_remove,
-};
-EXPORT_SYMBOL(vme_bus_type);
-
-static int __init vme_init(void)
-{
- return bus_register(&vme_bus_type);
-}
-
-static void __exit vme_exit(void)
-{
- bus_unregister(&vme_bus_type);
-}
-
-MODULE_DESCRIPTION("VME bridge driver framework");
-MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
-MODULE_LICENSE("GPL");
-
-module_init(vme_init);
-module_exit(vme_exit);
+++ /dev/null
-#ifndef _VME_H_
-#define _VME_H_
-
-/* Resource Type */
-enum vme_resource_type {
- VME_MASTER,
- VME_SLAVE,
- VME_DMA,
- VME_LM
-};
-
-/* VME Address Spaces */
-#define VME_A16 0x1
-#define VME_A24 0x2
-#define VME_A32 0x4
-#define VME_A64 0x8
-#define VME_CRCSR 0x10
-#define VME_USER1 0x20
-#define VME_USER2 0x40
-#define VME_USER3 0x80
-#define VME_USER4 0x100
-
-#define VME_A16_MAX 0x10000ULL
-#define VME_A24_MAX 0x1000000ULL
-#define VME_A32_MAX 0x100000000ULL
-#define VME_A64_MAX 0x10000000000000000ULL
-#define VME_CRCSR_MAX 0x1000000ULL
-
-
-/* VME Cycle Types */
-#define VME_SCT 0x1
-#define VME_BLT 0x2
-#define VME_MBLT 0x4
-#define VME_2eVME 0x8
-#define VME_2eSST 0x10
-#define VME_2eSSTB 0x20
-
-#define VME_2eSST160 0x100
-#define VME_2eSST267 0x200
-#define VME_2eSST320 0x400
-
-#define VME_SUPER 0x1000
-#define VME_USER 0x2000
-#define VME_PROG 0x4000
-#define VME_DATA 0x8000
-
-/* VME Data Widths */
-#define VME_D8 0x1
-#define VME_D16 0x2
-#define VME_D32 0x4
-#define VME_D64 0x8
-
-/* Arbitration Scheduling Modes */
-#define VME_R_ROBIN_MODE 0x1
-#define VME_PRIORITY_MODE 0x2
-
-#define VME_DMA_PATTERN (1<<0)
-#define VME_DMA_PCI (1<<1)
-#define VME_DMA_VME (1<<2)
-
-#define VME_DMA_PATTERN_BYTE (1<<0)
-#define VME_DMA_PATTERN_WORD (1<<1)
-#define VME_DMA_PATTERN_INCREMENT (1<<2)
-
-#define VME_DMA_VME_TO_MEM (1<<0)
-#define VME_DMA_MEM_TO_VME (1<<1)
-#define VME_DMA_VME_TO_VME (1<<2)
-#define VME_DMA_MEM_TO_MEM (1<<3)
-#define VME_DMA_PATTERN_TO_VME (1<<4)
-#define VME_DMA_PATTERN_TO_MEM (1<<5)
-
-struct vme_dma_attr {
- u32 type;
- void *private;
-};
-
-struct vme_resource {
- enum vme_resource_type type;
- struct list_head *entry;
-};
-
-extern struct bus_type vme_bus_type;
-
-/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */
-#define VME_MAX_BRIDGES (sizeof(unsigned int)*8)
-#define VME_MAX_SLOTS 32
-
-#define VME_SLOT_CURRENT -1
-#define VME_SLOT_ALL -2
-
-/**
- * Structure representing a VME device
- * @num: The device number
- * @bridge: Pointer to the bridge device this device is on
- * @dev: Internal device structure
- * @drv_list: List of devices (per driver)
- * @bridge_list: List of devices (per bridge)
- */
-struct vme_dev {
- int num;
- struct vme_bridge *bridge;
- struct device dev;
- struct list_head drv_list;
- struct list_head bridge_list;
-};
-
-struct vme_driver {
- struct list_head node;
- const char *name;
- int (*match)(struct vme_dev *);
- int (*probe)(struct vme_dev *);
- int (*remove)(struct vme_dev *);
- void (*shutdown)(void);
- struct device_driver driver;
- struct list_head devices;
-};
-
-void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *);
-void vme_free_consistent(struct vme_resource *, size_t, void *,
- dma_addr_t);
-
-size_t vme_get_size(struct vme_resource *);
-
-struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
-int vme_slave_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, u32, u32);
-int vme_slave_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, dma_addr_t *, u32 *, u32 *);
-void vme_slave_free(struct vme_resource *);
-
-struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32);
-int vme_master_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, u32, u32, u32);
-int vme_master_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, u32 *, u32 *, u32 *);
-ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
-ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
-unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
- unsigned int, loff_t);
-void vme_master_free(struct vme_resource *);
-
-struct vme_resource *vme_dma_request(struct vme_dev *, u32);
-struct vme_dma_list *vme_new_dma_list(struct vme_resource *);
-struct vme_dma_attr *vme_dma_pattern_attribute(u32, u32);
-struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t);
-struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32);
-void vme_dma_free_attribute(struct vme_dma_attr *);
-int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *,
- struct vme_dma_attr *, size_t);
-int vme_dma_list_exec(struct vme_dma_list *);
-int vme_dma_list_free(struct vme_dma_list *);
-int vme_dma_free(struct vme_resource *);
-
-int vme_irq_request(struct vme_dev *, int, int,
- void (*callback)(int, int, void *), void *);
-void vme_irq_free(struct vme_dev *, int, int);
-int vme_irq_generate(struct vme_dev *, int, int);
-
-struct vme_resource *vme_lm_request(struct vme_dev *);
-int vme_lm_count(struct vme_resource *);
-int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
-int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
-int vme_lm_attach(struct vme_resource *, int, void (*callback)(int));
-int vme_lm_detach(struct vme_resource *, int);
-void vme_lm_free(struct vme_resource *);
-
-int vme_slot_get(struct vme_dev *);
-
-int vme_register_driver(struct vme_driver *, unsigned int);
-void vme_unregister_driver(struct vme_driver *);
-
-
-#endif /* _VME_H_ */
-
+++ /dev/null
- VME Device Driver API
- =====================
-
-Driver registration
-===================
-
-As with other subsystems within the Linux kernel, VME device drivers register
-with the VME subsystem, typically called from the devices init routine. This is
-achieved via a call to the following function:
-
- int vme_register_driver (struct vme_driver *driver);
-
-If driver registration is successful this function returns zero, if an error
-occurred a negative error code will be returned.
-
-A pointer to a structure of type 'vme_driver' must be provided to the
-registration function. The structure is as follows:
-
- struct vme_driver {
- struct list_head node;
- const char *name;
- int (*match)(struct vme_dev *);
- int (*probe)(struct vme_dev *);
- int (*remove)(struct vme_dev *);
- void (*shutdown)(void);
- struct device_driver driver;
- struct list_head devices;
- unsigned int ndev;
- };
-
-At the minimum, the '.name', '.match' and '.probe' elements of this structure
-should be correctly set. The '.name' element is a pointer to a string holding
-the device driver's name.
-
-The '.match' function allows controlling the number of devices that need to
-be registered. The match function should return 1 if a device should be
-probed and 0 otherwise. This example match function (from vme_user.c) limits
-the number of devices probed to one:
-
- #define USER_BUS_MAX 1
- ...
- static int vme_user_match(struct vme_dev *vdev)
- {
- if (vdev->id.num >= USER_BUS_MAX)
- return 0;
- return 1;
- }
-
-The '.probe' element should contain a pointer to the probe routine. The
-probe routine is passed a 'struct vme_dev' pointer as an argument. The
-'struct vme_dev' structure looks like the following:
-
- struct vme_dev {
- int num;
- struct vme_bridge *bridge;
- struct device dev;
- struct list_head drv_list;
- struct list_head bridge_list;
- };
-
-Here, the 'num' field refers to the sequential device ID for this specific
-driver. The bridge number (or bus number) can be accessed using
-dev->bridge->num.
-
-A function is also provided to unregister the driver from the VME core and is
-usually called from the device driver's exit routine:
-
- void vme_unregister_driver (struct vme_driver *driver);
-
-
-Resource management
-===================
-
-Once a driver has registered with the VME core the provided match routine will
-be called the number of times specified during the registration. If a match
-succeeds, a non-zero value should be returned. A zero return value indicates
-failure. For all successful matches, the probe routine of the corresponding
-driver is called. The probe routine is passed a pointer to the devices
-device structure. This pointer should be saved, it will be required for
-requesting VME resources.
-
-The driver can request ownership of one or more master windows, slave windows
-and/or dma channels. Rather than allowing the device driver to request a
-specific window or DMA channel (which may be used by a different driver) this
-driver allows a resource to be assigned based on the required attributes of the
-driver in question:
-
- struct vme_resource * vme_master_request(struct vme_dev *dev,
- u32 aspace, u32 cycle, u32 width);
-
- struct vme_resource * vme_slave_request(struct vme_dev *dev, u32 aspace,
- u32 cycle);
-
- struct vme_resource *vme_dma_request(struct vme_dev *dev, u32 route);
-
-For slave windows these attributes are split into the VME address spaces that
-need to be accessed in 'aspace' and VME bus cycle types required in 'cycle'.
-Master windows add a further set of attributes in 'width' specifying the
-required data transfer widths. These attributes are defined as bitmasks and as
-such any combination of the attributes can be requested for a single window,
-the core will assign a window that meets the requirements, returning a pointer
-of type vme_resource that should be used to identify the allocated resource
-when it is used. For DMA controllers, the request function requires the
-potential direction of any transfers to be provided in the route attributes.
-This is typically VME-to-MEM and/or MEM-to-VME, though some hardware can
-support VME-to-VME and MEM-to-MEM transfers as well as test pattern generation.
-If an unallocated window fitting the requirements can not be found a NULL
-pointer will be returned.
-
-Functions are also provided to free window allocations once they are no longer
-required. These functions should be passed the pointer to the resource provided
-during resource allocation:
-
- void vme_master_free(struct vme_resource *res);
-
- void vme_slave_free(struct vme_resource *res);
-
- void vme_dma_free(struct vme_resource *res);
-
-
-Master windows
-==============
-
-Master windows provide access from the local processor[s] out onto the VME bus.
-The number of windows available and the available access modes is dependent on
-the underlying chipset. A window must be configured before it can be used.
-
-
-Master window configuration
----------------------------
-
-Once a master window has been assigned the following functions can be used to
-configure it and retrieve the current settings:
-
- int vme_master_set (struct vme_resource *res, int enabled,
- unsigned long long base, unsigned long long size, u32 aspace,
- u32 cycle, u32 width);
-
- int vme_master_get (struct vme_resource *res, int *enabled,
- unsigned long long *base, unsigned long long *size, u32 *aspace,
- u32 *cycle, u32 *width);
-
-The address spaces, transfer widths and cycle types are the same as described
-under resource management, however some of the options are mutually exclusive.
-For example, only one address space may be specified.
-
-These functions return 0 on success or an error code should the call fail.
-
-
-Master window access
---------------------
-
-The following functions can be used to read from and write to configured master
-windows. These functions return the number of bytes copied:
-
- ssize_t vme_master_read(struct vme_resource *res, void *buf,
- size_t count, loff_t offset);
-
- ssize_t vme_master_write(struct vme_resource *res, void *buf,
- size_t count, loff_t offset);
-
-In addition to simple reads and writes, a function is provided to do a
-read-modify-write transaction. This function returns the original value of the
-VME bus location :
-
- unsigned int vme_master_rmw (struct vme_resource *res,
- unsigned int mask, unsigned int compare, unsigned int swap,
- loff_t offset);
-
-This functions by reading the offset, applying the mask. If the bits selected in
-the mask match with the values of the corresponding bits in the compare field,
-the value of swap is written the specified offset.
-
-
-Slave windows
-=============
-
-Slave windows provide devices on the VME bus access into mapped portions of the
-local memory. The number of windows available and the access modes that can be
-used is dependent on the underlying chipset. A window must be configured before
-it can be used.
-
-
-Slave window configuration
---------------------------
-
-Once a slave window has been assigned the following functions can be used to
-configure it and retrieve the current settings:
-
- int vme_slave_set (struct vme_resource *res, int enabled,
- unsigned long long base, unsigned long long size,
- dma_addr_t mem, u32 aspace, u32 cycle);
-
- int vme_slave_get (struct vme_resource *res, int *enabled,
- unsigned long long *base, unsigned long long *size,
- dma_addr_t *mem, u32 *aspace, u32 *cycle);
-
-The address spaces, transfer widths and cycle types are the same as described
-under resource management, however some of the options are mutually exclusive.
-For example, only one address space may be specified.
-
-These functions return 0 on success or an error code should the call fail.
-
-
-Slave window buffer allocation
-------------------------------
-
-Functions are provided to allow the user to allocate and free a contiguous
-buffers which will be accessible by the VME bridge. These functions do not have
-to be used, other methods can be used to allocate a buffer, though care must be
-taken to ensure that they are contiguous and accessible by the VME bridge:
-
- void * vme_alloc_consistent(struct vme_resource *res, size_t size,
- dma_addr_t *mem);
-
- void vme_free_consistent(struct vme_resource *res, size_t size,
- void *virt, dma_addr_t mem);
-
-
-Slave window access
--------------------
-
-Slave windows map local memory onto the VME bus, the standard methods for
-accessing memory should be used.
-
-
-DMA channels
-============
-
-The VME DMA transfer provides the ability to run link-list DMA transfers. The
-API introduces the concept of DMA lists. Each DMA list is a link-list which can
-be passed to a DMA controller. Multiple lists can be created, extended,
-executed, reused and destroyed.
-
-
-List Management
----------------
-
-The following functions are provided to create and destroy DMA lists. Execution
-of a list will not automatically destroy the list, thus enabling a list to be
-reused for repetitive tasks:
-
- struct vme_dma_list *vme_new_dma_list(struct vme_resource *res);
-
- int vme_dma_list_free(struct vme_dma_list *list);
-
-
-List Population
----------------
-
-An item can be added to a list using the following function ( the source and
-destination attributes need to be created before calling this function, this is
-covered under "Transfer Attributes"):
-
- int vme_dma_list_add(struct vme_dma_list *list,
- struct vme_dma_attr *src, struct vme_dma_attr *dest,
- size_t count);
-
-NOTE: The detailed attributes of the transfers source and destination
- are not checked until an entry is added to a DMA list, the request
- for a DMA channel purely checks the directions in which the
- controller is expected to transfer data. As a result it is
- possible for this call to return an error, for example if the
- source or destination is in an unsupported VME address space.
-
-Transfer Attributes
--------------------
-
-The attributes for the source and destination are handled separately from adding
-an item to a list. This is due to the diverse attributes required for each type
-of source and destination. There are functions to create attributes for PCI, VME
-and pattern sources and destinations (where appropriate):
-
-Pattern source:
-
- struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type);
-
-PCI source or destination:
-
- struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t mem);
-
-VME source or destination:
-
- struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long base,
- u32 aspace, u32 cycle, u32 width);
-
-The following function should be used to free an attribute:
-
- void vme_dma_free_attribute(struct vme_dma_attr *attr);
-
-
-List Execution
---------------
-
-The following function queues a list for execution. The function will return
-once the list has been executed:
-
- int vme_dma_list_exec(struct vme_dma_list *list);
-
-
-Interrupts
-==========
-
-The VME API provides functions to attach and detach callbacks to specific VME
-level and status ID combinations and for the generation of VME interrupts with
-specific VME level and status IDs.
-
-
-Attaching Interrupt Handlers
-----------------------------
-
-The following functions can be used to attach and free a specific VME level and
-status ID combination. Any given combination can only be assigned a single
-callback function. A void pointer parameter is provided, the value of which is
-passed to the callback function, the use of this pointer is user undefined:
-
- int vme_irq_request(struct vme_dev *dev, int level, int statid,
- void (*callback)(int, int, void *), void *priv);
-
- void vme_irq_free(struct vme_dev *dev, int level, int statid);
-
-The callback parameters are as follows. Care must be taken in writing a callback
-function, callback functions run in interrupt context:
-
- void callback(int level, int statid, void *priv);
-
-
-Interrupt Generation
---------------------
-
-The following function can be used to generate a VME interrupt at a given VME
-level and VME status ID:
-
- int vme_irq_generate(struct vme_dev *dev, int level, int statid);
-
-
-Location monitors
-=================
-
-The VME API provides the following functionality to configure the location
-monitor.
-
-
-Location Monitor Management
----------------------------
-
-The following functions are provided to request the use of a block of location
-monitors and to free them after they are no longer required:
-
- struct vme_resource * vme_lm_request(struct vme_dev *dev);
-
- void vme_lm_free(struct vme_resource * res);
-
-Each block may provide a number of location monitors, monitoring adjacent
-locations. The following function can be used to determine how many locations
-are provided:
-
- int vme_lm_count(struct vme_resource * res);
-
-
-Location Monitor Configuration
-------------------------------
-
-Once a bank of location monitors has been allocated, the following functions
-are provided to configure the location and mode of the location monitor:
-
- int vme_lm_set(struct vme_resource *res, unsigned long long base,
- u32 aspace, u32 cycle);
-
- int vme_lm_get(struct vme_resource *res, unsigned long long *base,
- u32 *aspace, u32 *cycle);
-
-
-Location Monitor Use
---------------------
-
-The following functions allow a callback to be attached and detached from each
-location monitor location. Each location monitor can monitor a number of
-adjacent locations:
-
- int vme_lm_attach(struct vme_resource *res, int num,
- void (*callback)(int));
-
- int vme_lm_detach(struct vme_resource *res, int num);
-
-The callback function is declared as follows.
-
- void callback(int num);
-
-
-Slot Detection
-==============
-
-This function returns the slot ID of the provided bridge.
-
- int vme_slot_get(struct vme_dev *dev);
+++ /dev/null
-#ifndef _VME_BRIDGE_H_
-#define _VME_BRIDGE_H_
-
-#define VME_CRCSR_BUF_SIZE (508*1024)
-/*
- * Resource structures
- */
-struct vme_master_resource {
- struct list_head list;
- struct vme_bridge *parent;
- /*
- * We are likely to need to access the VME bus in interrupt context, so
- * protect master routines with a spinlock rather than a mutex.
- */
- spinlock_t lock;
- int locked;
- int number;
- u32 address_attr;
- u32 cycle_attr;
- u32 width_attr;
- struct resource bus_resource;
- void __iomem *kern_base;
-};
-
-struct vme_slave_resource {
- struct list_head list;
- struct vme_bridge *parent;
- struct mutex mtx;
- int locked;
- int number;
- u32 address_attr;
- u32 cycle_attr;
-};
-
-struct vme_dma_pattern {
- u32 pattern;
- u32 type;
-};
-
-struct vme_dma_pci {
- dma_addr_t address;
-};
-
-struct vme_dma_vme {
- unsigned long long address;
- u32 aspace;
- u32 cycle;
- u32 dwidth;
-};
-
-struct vme_dma_list {
- struct list_head list;
- struct vme_dma_resource *parent;
- struct list_head entries;
- struct mutex mtx;
-};
-
-struct vme_dma_resource {
- struct list_head list;
- struct vme_bridge *parent;
- struct mutex mtx;
- int locked;
- int number;
- struct list_head pending;
- struct list_head running;
- u32 route_attr;
-};
-
-struct vme_lm_resource {
- struct list_head list;
- struct vme_bridge *parent;
- struct mutex mtx;
- int locked;
- int number;
- int monitors;
-};
-
-struct vme_bus_error {
- struct list_head list;
- unsigned long long address;
- u32 attributes;
-};
-
-struct vme_callback {
- void (*func)(int, int, void*);
- void *priv_data;
-};
-
-struct vme_irq {
- int count;
- struct vme_callback callback[255];
-};
-
-/* Allow 16 characters for name (including null character) */
-#define VMENAMSIZ 16
-
-/* This structure stores all the information about one bridge
- * The structure should be dynamically allocated by the driver and one instance
- * of the structure should be present for each VME chip present in the system.
- */
-struct vme_bridge {
- char name[VMENAMSIZ];
- int num;
- struct list_head master_resources;
- struct list_head slave_resources;
- struct list_head dma_resources;
- struct list_head lm_resources;
-
- struct list_head vme_errors; /* List for errors generated on VME */
- struct list_head devices; /* List of devices on this bridge */
-
- /* Bridge Info - XXX Move to private structure? */
- struct device *parent; /* Parent device (eg. pdev->dev for PCI) */
- void *driver_priv; /* Private pointer for the bridge driver */
- struct list_head bus_list; /* list of VME buses */
-
- /* Interrupt callbacks */
- struct vme_irq irq[7];
- /* Locking for VME irq callback configuration */
- struct mutex irq_mtx;
-
- /* Slave Functions */
- int (*slave_get) (struct vme_slave_resource *, int *,
- unsigned long long *, unsigned long long *, dma_addr_t *,
- u32 *, u32 *);
- int (*slave_set) (struct vme_slave_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, u32, u32);
-
- /* Master Functions */
- int (*master_get) (struct vme_master_resource *, int *,
- unsigned long long *, unsigned long long *, u32 *, u32 *,
- u32 *);
- int (*master_set) (struct vme_master_resource *, int,
- unsigned long long, unsigned long long, u32, u32, u32);
- ssize_t (*master_read) (struct vme_master_resource *, void *, size_t,
- loff_t);
- ssize_t (*master_write) (struct vme_master_resource *, void *, size_t,
- loff_t);
- unsigned int (*master_rmw) (struct vme_master_resource *, unsigned int,
- unsigned int, unsigned int, loff_t);
-
- /* DMA Functions */
- int (*dma_list_add) (struct vme_dma_list *, struct vme_dma_attr *,
- struct vme_dma_attr *, size_t);
- int (*dma_list_exec) (struct vme_dma_list *);
- int (*dma_list_empty) (struct vme_dma_list *);
-
- /* Interrupt Functions */
- void (*irq_set) (struct vme_bridge *, int, int, int);
- int (*irq_generate) (struct vme_bridge *, int, int);
-
- /* Location monitor functions */
- int (*lm_set) (struct vme_lm_resource *, unsigned long long, u32, u32);
- int (*lm_get) (struct vme_lm_resource *, unsigned long long *, u32 *,
- u32 *);
- int (*lm_attach) (struct vme_lm_resource *, int, void (*callback)(int));
- int (*lm_detach) (struct vme_lm_resource *, int);
-
- /* CR/CSR space functions */
- int (*slot_get) (struct vme_bridge *);
-
- /* Bridge parent interface */
- void *(*alloc_consistent)(struct device *dev, size_t size,
- dma_addr_t *dma);
- void (*free_consistent)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma);
-};
-
-void vme_irq_handler(struct vme_bridge *, int, int);
-
-int vme_register_bridge(struct vme_bridge *);
-void vme_unregister_bridge(struct vme_bridge *);
-
-#endif /* _VME_BRIDGE_H_ */
--- /dev/null
+#
+# VME configuration.
+#
+
+menuconfig VME_BUS
+ tristate "VME bridge support"
+ depends on PCI
+ ---help---
+ If you say Y here you get support for the VME bridge Framework.
+
+if VME_BUS
+
+source "drivers/vme/bridges/Kconfig"
+
+source "drivers/vme/boards/Kconfig"
+
+source "drivers/staging/vme/devices/Kconfig"
+
+endif # VME
--- /dev/null
+#
+# Makefile for the VME bridge device drivers.
+#
+obj-$(CONFIG_VME_BUS) += vme.o
+
+obj-y += bridges/
+obj-y += boards/
--- /dev/null
+comment "VME Board Drivers"
+
+config VMIVME_7805
+ tristate "VMIVME-7805"
+ help
+ If you say Y here you get support for the VMIVME-7805 board.
+ This board has an additional control interface to the Universe II
+ chip. This driver has to be included if you want to access VME bus
+ with VMIVME-7805 board.
--- /dev/null
+#
+# Makefile for the VME board drivers.
+#
+
+obj-$(CONFIG_VMIVME_7805) += vme_vmivme7805.o
--- /dev/null
+/*
+ * Support for the VMIVME-7805 board access to the Universe II bridge.
+ *
+ * Author: Arthur Benilov <arthur.benilov@iba-group.com>
+ * Copyright 2010 Ion Beam Application, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/io.h>
+
+#include "vme_vmivme7805.h"
+
+static int __init vmic_init(void);
+static int vmic_probe(struct pci_dev *, const struct pci_device_id *);
+static void vmic_remove(struct pci_dev *);
+static void __exit vmic_exit(void);
+
+/** Base address to access FPGA register */
+static void *vmic_base;
+
+static const char driver_name[] = "vmivme_7805";
+
+static DEFINE_PCI_DEVICE_TABLE(vmic_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
+ { },
+};
+
+static struct pci_driver vmic_driver = {
+ .name = driver_name,
+ .id_table = vmic_ids,
+ .probe = vmic_probe,
+ .remove = vmic_remove,
+};
+
+static int __init vmic_init(void)
+{
+ return pci_register_driver(&vmic_driver);
+}
+
+static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int retval;
+ u32 data;
+
+ /* Enable the device */
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "Unable to enable device\n");
+ goto err;
+ }
+
+ /* Map Registers */
+ retval = pci_request_regions(pdev, driver_name);
+ if (retval) {
+ dev_err(&pdev->dev, "Unable to reserve resources\n");
+ goto err_resource;
+ }
+
+ /* Map registers in BAR 0 */
+ vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
+ if (!vmic_base) {
+ dev_err(&pdev->dev, "Unable to remap CRG region\n");
+ retval = -EIO;
+ goto err_remap;
+ }
+
+ /* Clear the FPGA VME IF contents */
+ iowrite32(0, vmic_base + VME_CONTROL);
+
+ /* Clear any initial BERR */
+ data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
+ data |= BM_VME_CONTROL_BERRST;
+ iowrite32(data, vmic_base + VME_CONTROL);
+
+ /* Enable the vme interface and byte swapping */
+ data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
+ data = data | BM_VME_CONTROL_MASTER_ENDIAN |
+ BM_VME_CONTROL_SLAVE_ENDIAN |
+ BM_VME_CONTROL_ABLE |
+ BM_VME_CONTROL_BERRI |
+ BM_VME_CONTROL_BPENA |
+ BM_VME_CONTROL_VBENA;
+ iowrite32(data, vmic_base + VME_CONTROL);
+
+ return 0;
+
+err_remap:
+ pci_release_regions(pdev);
+err_resource:
+ pci_disable_device(pdev);
+err:
+ return retval;
+}
+
+static void vmic_remove(struct pci_dev *pdev)
+{
+ iounmap(vmic_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+}
+
+static void __exit vmic_exit(void)
+{
+ pci_unregister_driver(&vmic_driver);
+}
+
+MODULE_DESCRIPTION("VMIVME-7805 board support driver");
+MODULE_AUTHOR("Arthur Benilov <arthur.benilov@iba-group.com>");
+MODULE_LICENSE("GPL");
+
+module_init(vmic_init);
+module_exit(vmic_exit);
+
--- /dev/null
+/*
+ * vmivme_7805.h
+ *
+ * Support for the VMIVME-7805 board access to the Universe II bridge.
+ *
+ * Author: Arthur Benilov <arthur.benilov@iba-group.com>
+ * Copyright 2010 Ion Beam Application, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+
+#ifndef _VMIVME_7805_H
+#define _VMIVME_7805_H
+
+#ifndef PCI_VENDOR_ID_VMIC
+#define PCI_VENDOR_ID_VMIC 0x114A
+#endif
+
+#ifndef PCI_DEVICE_ID_VTIMR
+#define PCI_DEVICE_ID_VTIMR 0x0004
+#endif
+
+#define VME_CONTROL 0x0000
+#define BM_VME_CONTROL_MASTER_ENDIAN 0x0001
+#define BM_VME_CONTROL_SLAVE_ENDIAN 0x0002
+#define BM_VME_CONTROL_ABLE 0x0004
+#define BM_VME_CONTROL_BERRI 0x0040
+#define BM_VME_CONTROL_BERRST 0x0080
+#define BM_VME_CONTROL_BPENA 0x0400
+#define BM_VME_CONTROL_VBENA 0x0800
+
+#endif /* _VMIVME_7805_H */
+
--- /dev/null
+comment "VME Bridge Drivers"
+
+config VME_CA91CX42
+ tristate "Universe II"
+ depends on VIRT_TO_BUS
+ help
+ If you say Y here you get support for the Tundra CA91C142
+ (Universe II) VME bridge chip.
+
+config VME_TSI148
+ tristate "Tempe"
+ depends on VIRT_TO_BUS
+ help
+ If you say Y here you get support for the Tundra TSI148 VME bridge
+ chip.
--- /dev/null
+obj-$(CONFIG_VME_CA91CX42) += vme_ca91cx42.o
+obj-$(CONFIG_VME_TSI148) += vme_tsi148.o
--- /dev/null
+/*
+ * Support for the Tundra Universe I/II VME-PCI Bridge Chips
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * Based on work by Tom Armistead and Ajit Prem
+ * Copyright 2004 Motorola Inc.
+ *
+ * Derived from ca91c042.c by Michael Wyrick
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/poll.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/vme.h>
+
+#include "../vme_bridge.h"
+#include "vme_ca91cx42.h"
+
+static int __init ca91cx42_init(void);
+static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
+static void ca91cx42_remove(struct pci_dev *);
+static void __exit ca91cx42_exit(void);
+
+/* Module parameters */
+static int geoid;
+
+static const char driver_name[] = "vme_ca91cx42";
+
+static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
+ { },
+};
+
+static struct pci_driver ca91cx42_driver = {
+ .name = driver_name,
+ .id_table = ca91cx42_ids,
+ .probe = ca91cx42_probe,
+ .remove = ca91cx42_remove,
+};
+
+static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
+{
+ wake_up(&bridge->dma_queue);
+
+ return CA91CX42_LINT_DMA;
+}
+
+static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
+{
+ int i;
+ u32 serviced = 0;
+
+ for (i = 0; i < 4; i++) {
+ if (stat & CA91CX42_LINT_LM[i]) {
+ /* We only enable interrupts if the callback is set */
+ bridge->lm_callback[i](i);
+ serviced |= CA91CX42_LINT_LM[i];
+ }
+ }
+
+ return serviced;
+}
+
+/* XXX This needs to be split into 4 queues */
+static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
+{
+ wake_up(&bridge->mbox_queue);
+
+ return CA91CX42_LINT_MBOX;
+}
+
+static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
+{
+ wake_up(&bridge->iack_queue);
+
+ return CA91CX42_LINT_SW_IACK;
+}
+
+static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
+{
+ int val;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+ val = ioread32(bridge->base + DGCS);
+
+ if (!(val & 0x00000800)) {
+ dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
+ "Read Error DGCS=%08X\n", val);
+ }
+
+ return CA91CX42_LINT_VERR;
+}
+
+static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
+{
+ int val;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+ val = ioread32(bridge->base + DGCS);
+
+ if (!(val & 0x00000800))
+ dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
+ "Read Error DGCS=%08X\n", val);
+
+ return CA91CX42_LINT_LERR;
+}
+
+
+static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
+ int stat)
+{
+ int vec, i, serviced = 0;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+
+ for (i = 7; i > 0; i--) {
+ if (stat & (1 << i)) {
+ vec = ioread32(bridge->base +
+ CA91CX42_V_STATID[i]) & 0xff;
+
+ vme_irq_handler(ca91cx42_bridge, i, vec);
+
+ serviced |= (1 << i);
+ }
+ }
+
+ return serviced;
+}
+
+static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
+{
+ u32 stat, enable, serviced = 0;
+ struct vme_bridge *ca91cx42_bridge;
+ struct ca91cx42_driver *bridge;
+
+ ca91cx42_bridge = ptr;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+ enable = ioread32(bridge->base + LINT_EN);
+ stat = ioread32(bridge->base + LINT_STAT);
+
+ /* Only look at unmasked interrupts */
+ stat &= enable;
+
+ if (unlikely(!stat))
+ return IRQ_NONE;
+
+ if (stat & CA91CX42_LINT_DMA)
+ serviced |= ca91cx42_DMA_irqhandler(bridge);
+ if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
+ CA91CX42_LINT_LM3))
+ serviced |= ca91cx42_LM_irqhandler(bridge, stat);
+ if (stat & CA91CX42_LINT_MBOX)
+ serviced |= ca91cx42_MB_irqhandler(bridge, stat);
+ if (stat & CA91CX42_LINT_SW_IACK)
+ serviced |= ca91cx42_IACK_irqhandler(bridge);
+ if (stat & CA91CX42_LINT_VERR)
+ serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
+ if (stat & CA91CX42_LINT_LERR)
+ serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
+ if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
+ CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
+ CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
+ CA91CX42_LINT_VIRQ7))
+ serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
+
+ /* Clear serviced interrupts */
+ iowrite32(serviced, bridge->base + LINT_STAT);
+
+ return IRQ_HANDLED;
+}
+
+static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
+{
+ int result, tmp;
+ struct pci_dev *pdev;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+ /* Need pdev */
+ pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
+
+ /* Initialise list for VME bus errors */
+ INIT_LIST_HEAD(&ca91cx42_bridge->vme_errors);
+
+ mutex_init(&ca91cx42_bridge->irq_mtx);
+
+ /* Disable interrupts from PCI to VME */
+ iowrite32(0, bridge->base + VINT_EN);
+
+ /* Disable PCI interrupts */
+ iowrite32(0, bridge->base + LINT_EN);
+ /* Clear Any Pending PCI Interrupts */
+ iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
+
+ result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
+ driver_name, ca91cx42_bridge);
+ if (result) {
+ dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
+ pdev->irq);
+ return result;
+ }
+
+ /* Ensure all interrupts are mapped to PCI Interrupt 0 */
+ iowrite32(0, bridge->base + LINT_MAP0);
+ iowrite32(0, bridge->base + LINT_MAP1);
+ iowrite32(0, bridge->base + LINT_MAP2);
+
+ /* Enable DMA, mailbox & LM Interrupts */
+ tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
+ CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
+ CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
+
+ iowrite32(tmp, bridge->base + LINT_EN);
+
+ return 0;
+}
+
+static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
+ struct pci_dev *pdev)
+{
+ /* Disable interrupts from PCI to VME */
+ iowrite32(0, bridge->base + VINT_EN);
+
+ /* Disable PCI interrupts */
+ iowrite32(0, bridge->base + LINT_EN);
+ /* Clear Any Pending PCI Interrupts */
+ iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
+
+ free_irq(pdev->irq, pdev);
+}
+
+static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
+{
+ u32 tmp;
+
+ tmp = ioread32(bridge->base + LINT_STAT);
+
+ if (tmp & (1 << level))
+ return 0;
+ else
+ return 1;
+}
+
+/*
+ * Set up an VME interrupt
+ */
+static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
+ int state, int sync)
+
+{
+ struct pci_dev *pdev;
+ u32 tmp;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+ /* Enable IRQ level */
+ tmp = ioread32(bridge->base + LINT_EN);
+
+ if (state == 0)
+ tmp &= ~CA91CX42_LINT_VIRQ[level];
+ else
+ tmp |= CA91CX42_LINT_VIRQ[level];
+
+ iowrite32(tmp, bridge->base + LINT_EN);
+
+ if ((state == 0) && (sync != 0)) {
+ pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
+ dev);
+
+ synchronize_irq(pdev->irq);
+ }
+}
+
+static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
+ int statid)
+{
+ u32 tmp;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+ /* Universe can only generate even vectors */
+ if (statid & 1)
+ return -EINVAL;
+
+ mutex_lock(&bridge->vme_int);
+
+ tmp = ioread32(bridge->base + VINT_EN);
+
+ /* Set Status/ID */
+ iowrite32(statid << 24, bridge->base + STATID);
+
+ /* Assert VMEbus IRQ */
+ tmp = tmp | (1 << (level + 24));
+ iowrite32(tmp, bridge->base + VINT_EN);
+
+ /* Wait for IACK */
+ wait_event_interruptible(bridge->iack_queue,
+ ca91cx42_iack_received(bridge, level));
+
+ /* Return interrupt to low state */
+ tmp = ioread32(bridge->base + VINT_EN);
+ tmp = tmp & ~(1 << (level + 24));
+ iowrite32(tmp, bridge->base + VINT_EN);
+
+ mutex_unlock(&bridge->vme_int);
+
+ return 0;
+}
+
+static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
+ unsigned long long vme_base, unsigned long long size,
+ dma_addr_t pci_base, u32 aspace, u32 cycle)
+{
+ unsigned int i, addr = 0, granularity;
+ unsigned int temp_ctl = 0;
+ unsigned int vme_bound, pci_offset;
+ struct vme_bridge *ca91cx42_bridge;
+ struct ca91cx42_driver *bridge;
+
+ ca91cx42_bridge = image->parent;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+ i = image->number;
+
+ switch (aspace) {
+ case VME_A16:
+ addr |= CA91CX42_VSI_CTL_VAS_A16;
+ break;
+ case VME_A24:
+ addr |= CA91CX42_VSI_CTL_VAS_A24;
+ break;
+ case VME_A32:
+ addr |= CA91CX42_VSI_CTL_VAS_A32;
+ break;
+ case VME_USER1:
+ addr |= CA91CX42_VSI_CTL_VAS_USER1;
+ break;
+ case VME_USER2:
+ addr |= CA91CX42_VSI_CTL_VAS_USER2;
+ break;
+ case VME_A64:
+ case VME_CRCSR:
+ case VME_USER3:
+ case VME_USER4:
+ default:
+ dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
+ return -EINVAL;
+ break;
+ }
+
+ /*
+ * Bound address is a valid address for the window, adjust
+ * accordingly
+ */
+ vme_bound = vme_base + size;
+ pci_offset = pci_base - vme_base;
+
+ if ((i == 0) || (i == 4))
+ granularity = 0x1000;
+ else
+ granularity = 0x10000;
+
+ if (vme_base & (granularity - 1)) {
+ dev_err(ca91cx42_bridge->parent, "Invalid VME base "
+ "alignment\n");
+ return -EINVAL;
+ }
+ if (vme_bound & (granularity - 1)) {
+ dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
+ "alignment\n");
+ return -EINVAL;
+ }
+ if (pci_offset & (granularity - 1)) {
+ dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
+ "alignment\n");
+ return -EINVAL;
+ }
+
+ /* Disable while we are mucking around */
+ temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
+ temp_ctl &= ~CA91CX42_VSI_CTL_EN;
+ iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
+
+ /* Setup mapping */
+ iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
+ iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
+ iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
+
+ /* Setup address space */
+ temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
+ temp_ctl |= addr;
+
+ /* Setup cycle types */
+ temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
+ if (cycle & VME_SUPER)
+ temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
+ if (cycle & VME_USER)
+ temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
+ if (cycle & VME_PROG)
+ temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
+ if (cycle & VME_DATA)
+ temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
+
+ /* Write ctl reg without enable */
+ iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
+
+ if (enabled)
+ temp_ctl |= CA91CX42_VSI_CTL_EN;
+
+ iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
+
+ return 0;
+}
+
+static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
+ unsigned long long *vme_base, unsigned long long *size,
+ dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
+{
+ unsigned int i, granularity = 0, ctl = 0;
+ unsigned long long vme_bound, pci_offset;
+ struct ca91cx42_driver *bridge;
+
+ bridge = image->parent->driver_priv;
+
+ i = image->number;
+
+ if ((i == 0) || (i == 4))
+ granularity = 0x1000;
+ else
+ granularity = 0x10000;
+
+ /* Read Registers */
+ ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
+
+ *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
+ vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
+ pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
+
+ *pci_base = (dma_addr_t)vme_base + pci_offset;
+ *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
+
+ *enabled = 0;
+ *aspace = 0;
+ *cycle = 0;
+
+ if (ctl & CA91CX42_VSI_CTL_EN)
+ *enabled = 1;
+
+ if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
+ *aspace = VME_A16;
+ if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
+ *aspace = VME_A24;
+ if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
+ *aspace = VME_A32;
+ if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
+ *aspace = VME_USER1;
+ if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
+ *aspace = VME_USER2;
+
+ if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
+ *cycle |= VME_SUPER;
+ if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
+ *cycle |= VME_USER;
+ if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
+ *cycle |= VME_PROG;
+ if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
+ *cycle |= VME_DATA;
+
+ return 0;
+}
+
+/*
+ * Allocate and map PCI Resource
+ */
+static int ca91cx42_alloc_resource(struct vme_master_resource *image,
+ unsigned long long size)
+{
+ unsigned long long existing_size;
+ int retval = 0;
+ struct pci_dev *pdev;
+ struct vme_bridge *ca91cx42_bridge;
+
+ ca91cx42_bridge = image->parent;
+
+ /* Find pci_dev container of dev */
+ if (ca91cx42_bridge->parent == NULL) {
+ dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
+ return -EINVAL;
+ }
+ pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
+
+ existing_size = (unsigned long long)(image->bus_resource.end -
+ image->bus_resource.start);
+
+ /* If the existing size is OK, return */
+ if (existing_size == (size - 1))
+ return 0;
+
+ if (existing_size != 0) {
+ iounmap(image->kern_base);
+ image->kern_base = NULL;
+ kfree(image->bus_resource.name);
+ release_resource(&image->bus_resource);
+ memset(&image->bus_resource, 0, sizeof(struct resource));
+ }
+
+ if (image->bus_resource.name == NULL) {
+ image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
+ if (image->bus_resource.name == NULL) {
+ dev_err(ca91cx42_bridge->parent, "Unable to allocate "
+ "memory for resource name\n");
+ retval = -ENOMEM;
+ goto err_name;
+ }
+ }
+
+ sprintf((char *)image->bus_resource.name, "%s.%d",
+ ca91cx42_bridge->name, image->number);
+
+ image->bus_resource.start = 0;
+ image->bus_resource.end = (unsigned long)size;
+ image->bus_resource.flags = IORESOURCE_MEM;
+
+ retval = pci_bus_alloc_resource(pdev->bus,
+ &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
+ 0, NULL, NULL);
+ if (retval) {
+ dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
+ "resource for window %d size 0x%lx start 0x%lx\n",
+ image->number, (unsigned long)size,
+ (unsigned long)image->bus_resource.start);
+ goto err_resource;
+ }
+
+ image->kern_base = ioremap_nocache(
+ image->bus_resource.start, size);
+ if (image->kern_base == NULL) {
+ dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
+ retval = -ENOMEM;
+ goto err_remap;
+ }
+
+ return 0;
+
+err_remap:
+ release_resource(&image->bus_resource);
+err_resource:
+ kfree(image->bus_resource.name);
+ memset(&image->bus_resource, 0, sizeof(struct resource));
+err_name:
+ return retval;
+}
+
+/*
+ * Free and unmap PCI Resource
+ */
+static void ca91cx42_free_resource(struct vme_master_resource *image)
+{
+ iounmap(image->kern_base);
+ image->kern_base = NULL;
+ release_resource(&image->bus_resource);
+ kfree(image->bus_resource.name);
+ memset(&image->bus_resource, 0, sizeof(struct resource));
+}
+
+
+static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
+ unsigned long long vme_base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 dwidth)
+{
+ int retval = 0;
+ unsigned int i, granularity = 0;
+ unsigned int temp_ctl = 0;
+ unsigned long long pci_bound, vme_offset, pci_base;
+ struct vme_bridge *ca91cx42_bridge;
+ struct ca91cx42_driver *bridge;
+
+ ca91cx42_bridge = image->parent;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+ i = image->number;
+
+ if ((i == 0) || (i == 4))
+ granularity = 0x1000;
+ else
+ granularity = 0x10000;
+
+ /* Verify input data */
+ if (vme_base & (granularity - 1)) {
+ dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
+ "alignment\n");
+ retval = -EINVAL;
+ goto err_window;
+ }
+ if (size & (granularity - 1)) {
+ dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
+ "alignment\n");
+ retval = -EINVAL;
+ goto err_window;
+ }
+
+ spin_lock(&image->lock);
+
+ /*
+ * Let's allocate the resource here rather than further up the stack as
+ * it avoids pushing loads of bus dependent stuff up the stack
+ */
+ retval = ca91cx42_alloc_resource(image, size);
+ if (retval) {
+ spin_unlock(&image->lock);
+ dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
+ "for resource name\n");
+ retval = -ENOMEM;
+ goto err_res;
+ }
+
+ pci_base = (unsigned long long)image->bus_resource.start;
+
+ /*
+ * Bound address is a valid address for the window, adjust
+ * according to window granularity.
+ */
+ pci_bound = pci_base + size;
+ vme_offset = vme_base - pci_base;
+
+ /* Disable while we are mucking around */
+ temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
+ temp_ctl &= ~CA91CX42_LSI_CTL_EN;
+ iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
+
+ /* Setup cycle types */
+ temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
+ if (cycle & VME_BLT)
+ temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
+ if (cycle & VME_MBLT)
+ temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
+
+ /* Setup data width */
+ temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
+ switch (dwidth) {
+ case VME_D8:
+ temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
+ break;
+ case VME_D16:
+ temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
+ break;
+ case VME_D32:
+ temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
+ break;
+ case VME_D64:
+ temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
+ break;
+ default:
+ spin_unlock(&image->lock);
+ dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
+ retval = -EINVAL;
+ goto err_dwidth;
+ break;
+ }
+
+ /* Setup address space */
+ temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
+ switch (aspace) {
+ case VME_A16:
+ temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
+ break;
+ case VME_A24:
+ temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
+ break;
+ case VME_A32:
+ temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
+ break;
+ case VME_CRCSR:
+ temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
+ break;
+ case VME_USER1:
+ temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
+ break;
+ case VME_USER2:
+ temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
+ break;
+ case VME_A64:
+ case VME_USER3:
+ case VME_USER4:
+ default:
+ spin_unlock(&image->lock);
+ dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
+ retval = -EINVAL;
+ goto err_aspace;
+ break;
+ }
+
+ temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
+ if (cycle & VME_SUPER)
+ temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
+ if (cycle & VME_PROG)
+ temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
+
+ /* Setup mapping */
+ iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
+ iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
+ iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
+
+ /* Write ctl reg without enable */
+ iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
+
+ if (enabled)
+ temp_ctl |= CA91CX42_LSI_CTL_EN;
+
+ iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
+
+ spin_unlock(&image->lock);
+ return 0;
+
+err_aspace:
+err_dwidth:
+ ca91cx42_free_resource(image);
+err_res:
+err_window:
+ return retval;
+}
+
+static int __ca91cx42_master_get(struct vme_master_resource *image,
+ int *enabled, unsigned long long *vme_base, unsigned long long *size,
+ u32 *aspace, u32 *cycle, u32 *dwidth)
+{
+ unsigned int i, ctl;
+ unsigned long long pci_base, pci_bound, vme_offset;
+ struct ca91cx42_driver *bridge;
+
+ bridge = image->parent->driver_priv;
+
+ i = image->number;
+
+ ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
+
+ pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
+ vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
+ pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
+
+ *vme_base = pci_base + vme_offset;
+ *size = (unsigned long long)(pci_bound - pci_base);
+
+ *enabled = 0;
+ *aspace = 0;
+ *cycle = 0;
+ *dwidth = 0;
+
+ if (ctl & CA91CX42_LSI_CTL_EN)
+ *enabled = 1;
+
+ /* Setup address space */
+ switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
+ case CA91CX42_LSI_CTL_VAS_A16:
+ *aspace = VME_A16;
+ break;
+ case CA91CX42_LSI_CTL_VAS_A24:
+ *aspace = VME_A24;
+ break;
+ case CA91CX42_LSI_CTL_VAS_A32:
+ *aspace = VME_A32;
+ break;
+ case CA91CX42_LSI_CTL_VAS_CRCSR:
+ *aspace = VME_CRCSR;
+ break;
+ case CA91CX42_LSI_CTL_VAS_USER1:
+ *aspace = VME_USER1;
+ break;
+ case CA91CX42_LSI_CTL_VAS_USER2:
+ *aspace = VME_USER2;
+ break;
+ }
+
+ /* XXX Not sure howto check for MBLT */
+ /* Setup cycle types */
+ if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
+ *cycle |= VME_BLT;
+ else
+ *cycle |= VME_SCT;
+
+ if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
+ *cycle |= VME_SUPER;
+ else
+ *cycle |= VME_USER;
+
+ if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
+ *cycle = VME_PROG;
+ else
+ *cycle = VME_DATA;
+
+ /* Setup data width */
+ switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
+ case CA91CX42_LSI_CTL_VDW_D8:
+ *dwidth = VME_D8;
+ break;
+ case CA91CX42_LSI_CTL_VDW_D16:
+ *dwidth = VME_D16;
+ break;
+ case CA91CX42_LSI_CTL_VDW_D32:
+ *dwidth = VME_D32;
+ break;
+ case CA91CX42_LSI_CTL_VDW_D64:
+ *dwidth = VME_D64;
+ break;
+ }
+
+ return 0;
+}
+
+static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
+{
+ int retval;
+
+ spin_lock(&image->lock);
+
+ retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
+ cycle, dwidth);
+
+ spin_unlock(&image->lock);
+
+ return retval;
+}
+
+static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
+ void *buf, size_t count, loff_t offset)
+{
+ ssize_t retval;
+ void *addr = image->kern_base + offset;
+ unsigned int done = 0;
+ unsigned int count32;
+
+ if (count == 0)
+ return 0;
+
+ spin_lock(&image->lock);
+
+ /* The following code handles VME address alignment problem
+ * in order to assure the maximal data width cycle.
+ * We cannot use memcpy_xxx directly here because it
+ * may cut data transfer in 8-bits cycles, thus making
+ * D16 cycle impossible.
+ * From the other hand, the bridge itself assures that
+ * maximal configured data cycle is used and splits it
+ * automatically for non-aligned addresses.
+ */
+ if ((uintptr_t)addr & 0x1) {
+ *(u8 *)buf = ioread8(addr);
+ done += 1;
+ if (done == count)
+ goto out;
+ }
+ if ((uintptr_t)addr & 0x2) {
+ if ((count - done) < 2) {
+ *(u8 *)(buf + done) = ioread8(addr + done);
+ done += 1;
+ goto out;
+ } else {
+ *(u16 *)(buf + done) = ioread16(addr + done);
+ done += 2;
+ }
+ }
+
+ count32 = (count - done) & ~0x3;
+ if (count32 > 0) {
+ memcpy_fromio(buf + done, addr + done, (unsigned int)count);
+ done += count32;
+ }
+
+ if ((count - done) & 0x2) {
+ *(u16 *)(buf + done) = ioread16(addr + done);
+ done += 2;
+ }
+ if ((count - done) & 0x1) {
+ *(u8 *)(buf + done) = ioread8(addr + done);
+ done += 1;
+ }
+out:
+ retval = count;
+ spin_unlock(&image->lock);
+
+ return retval;
+}
+
+static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
+ void *buf, size_t count, loff_t offset)
+{
+ ssize_t retval;
+ void *addr = image->kern_base + offset;
+ unsigned int done = 0;
+ unsigned int count32;
+
+ if (count == 0)
+ return 0;
+
+ spin_lock(&image->lock);
+
+ /* Here we apply for the same strategy we do in master_read
+ * function in order to assure D16 cycle when required.
+ */
+ if ((uintptr_t)addr & 0x1) {
+ iowrite8(*(u8 *)buf, addr);
+ done += 1;
+ if (done == count)
+ goto out;
+ }
+ if ((uintptr_t)addr & 0x2) {
+ if ((count - done) < 2) {
+ iowrite8(*(u8 *)(buf + done), addr + done);
+ done += 1;
+ goto out;
+ } else {
+ iowrite16(*(u16 *)(buf + done), addr + done);
+ done += 2;
+ }
+ }
+
+ count32 = (count - done) & ~0x3;
+ if (count32 > 0) {
+ memcpy_toio(addr + done, buf + done, count32);
+ done += count32;
+ }
+
+ if ((count - done) & 0x2) {
+ iowrite16(*(u16 *)(buf + done), addr + done);
+ done += 2;
+ }
+ if ((count - done) & 0x1) {
+ iowrite8(*(u8 *)(buf + done), addr + done);
+ done += 1;
+ }
+out:
+ retval = count;
+
+ spin_unlock(&image->lock);
+
+ return retval;
+}
+
+static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
+ unsigned int mask, unsigned int compare, unsigned int swap,
+ loff_t offset)
+{
+ u32 result;
+ uintptr_t pci_addr;
+ int i;
+ struct ca91cx42_driver *bridge;
+ struct device *dev;
+
+ bridge = image->parent->driver_priv;
+ dev = image->parent->parent;
+
+ /* Find the PCI address that maps to the desired VME address */
+ i = image->number;
+
+ /* Locking as we can only do one of these at a time */
+ mutex_lock(&bridge->vme_rmw);
+
+ /* Lock image */
+ spin_lock(&image->lock);
+
+ pci_addr = (uintptr_t)image->kern_base + offset;
+
+ /* Address must be 4-byte aligned */
+ if (pci_addr & 0x3) {
+ dev_err(dev, "RMW Address not 4-byte aligned\n");
+ result = -EINVAL;
+ goto out;
+ }
+
+ /* Ensure RMW Disabled whilst configuring */
+ iowrite32(0, bridge->base + SCYC_CTL);
+
+ /* Configure registers */
+ iowrite32(mask, bridge->base + SCYC_EN);
+ iowrite32(compare, bridge->base + SCYC_CMP);
+ iowrite32(swap, bridge->base + SCYC_SWP);
+ iowrite32(pci_addr, bridge->base + SCYC_ADDR);
+
+ /* Enable RMW */
+ iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
+
+ /* Kick process off with a read to the required address. */
+ result = ioread32(image->kern_base + offset);
+
+ /* Disable RMW */
+ iowrite32(0, bridge->base + SCYC_CTL);
+
+out:
+ spin_unlock(&image->lock);
+
+ mutex_unlock(&bridge->vme_rmw);
+
+ return result;
+}
+
+static int ca91cx42_dma_list_add(struct vme_dma_list *list,
+ struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
+{
+ struct ca91cx42_dma_entry *entry, *prev;
+ struct vme_dma_pci *pci_attr;
+ struct vme_dma_vme *vme_attr;
+ dma_addr_t desc_ptr;
+ int retval = 0;
+ struct device *dev;
+
+ dev = list->parent->parent->parent;
+
+ /* XXX descriptor must be aligned on 64-bit boundaries */
+ entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
+ if (entry == NULL) {
+ dev_err(dev, "Failed to allocate memory for dma resource "
+ "structure\n");
+ retval = -ENOMEM;
+ goto err_mem;
+ }
+
+ /* Test descriptor alignment */
+ if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
+ dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
+ "required: %p\n", &entry->descriptor);
+ retval = -EINVAL;
+ goto err_align;
+ }
+
+ memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
+
+ if (dest->type == VME_DMA_VME) {
+ entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
+ vme_attr = dest->private;
+ pci_attr = src->private;
+ } else {
+ vme_attr = src->private;
+ pci_attr = dest->private;
+ }
+
+ /* Check we can do fulfill required attributes */
+ if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
+ VME_USER2)) != 0) {
+
+ dev_err(dev, "Unsupported cycle type\n");
+ retval = -EINVAL;
+ goto err_aspace;
+ }
+
+ if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
+ VME_PROG | VME_DATA)) != 0) {
+
+ dev_err(dev, "Unsupported cycle type\n");
+ retval = -EINVAL;
+ goto err_cycle;
+ }
+
+ /* Check to see if we can fulfill source and destination */
+ if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
+ ((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
+
+ dev_err(dev, "Cannot perform transfer with this "
+ "source-destination combination\n");
+ retval = -EINVAL;
+ goto err_direct;
+ }
+
+ /* Setup cycle types */
+ if (vme_attr->cycle & VME_BLT)
+ entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
+
+ /* Setup data width */
+ switch (vme_attr->dwidth) {
+ case VME_D8:
+ entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
+ break;
+ case VME_D16:
+ entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
+ break;
+ case VME_D32:
+ entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
+ break;
+ case VME_D64:
+ entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
+ break;
+ default:
+ dev_err(dev, "Invalid data width\n");
+ return -EINVAL;
+ }
+
+ /* Setup address space */
+ switch (vme_attr->aspace) {
+ case VME_A16:
+ entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
+ break;
+ case VME_A24:
+ entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
+ break;
+ case VME_A32:
+ entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
+ break;
+ case VME_USER1:
+ entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
+ break;
+ case VME_USER2:
+ entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
+ break;
+ default:
+ dev_err(dev, "Invalid address space\n");
+ return -EINVAL;
+ break;
+ }
+
+ if (vme_attr->cycle & VME_SUPER)
+ entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
+ if (vme_attr->cycle & VME_PROG)
+ entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
+
+ entry->descriptor.dtbc = count;
+ entry->descriptor.dla = pci_attr->address;
+ entry->descriptor.dva = vme_attr->address;
+ entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
+
+ /* Add to list */
+ list_add_tail(&entry->list, &list->entries);
+
+ /* Fill out previous descriptors "Next Address" */
+ if (entry->list.prev != &list->entries) {
+ prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
+ list);
+ /* We need the bus address for the pointer */
+ desc_ptr = virt_to_bus(&entry->descriptor);
+ prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
+ }
+
+ return 0;
+
+err_cycle:
+err_aspace:
+err_direct:
+err_align:
+ kfree(entry);
+err_mem:
+ return retval;
+}
+
+static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
+{
+ u32 tmp;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+ tmp = ioread32(bridge->base + DGCS);
+
+ if (tmp & CA91CX42_DGCS_ACT)
+ return 0;
+ else
+ return 1;
+}
+
+static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
+{
+ struct vme_dma_resource *ctrlr;
+ struct ca91cx42_dma_entry *entry;
+ int retval = 0;
+ dma_addr_t bus_addr;
+ u32 val;
+ struct device *dev;
+ struct ca91cx42_driver *bridge;
+
+ ctrlr = list->parent;
+
+ bridge = ctrlr->parent->driver_priv;
+ dev = ctrlr->parent->parent;
+
+ mutex_lock(&ctrlr->mtx);
+
+ if (!(list_empty(&ctrlr->running))) {
+ /*
+ * XXX We have an active DMA transfer and currently haven't
+ * sorted out the mechanism for "pending" DMA transfers.
+ * Return busy.
+ */
+ /* Need to add to pending here */
+ mutex_unlock(&ctrlr->mtx);
+ return -EBUSY;
+ } else {
+ list_add(&list->list, &ctrlr->running);
+ }
+
+ /* Get first bus address and write into registers */
+ entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
+ list);
+
+ bus_addr = virt_to_bus(&entry->descriptor);
+
+ mutex_unlock(&ctrlr->mtx);
+
+ iowrite32(0, bridge->base + DTBC);
+ iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
+
+ /* Start the operation */
+ val = ioread32(bridge->base + DGCS);
+
+ /* XXX Could set VMEbus On and Off Counters here */
+ val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
+
+ val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
+ CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
+ CA91CX42_DGCS_PERR);
+
+ iowrite32(val, bridge->base + DGCS);
+
+ val |= CA91CX42_DGCS_GO;
+
+ iowrite32(val, bridge->base + DGCS);
+
+ wait_event_interruptible(bridge->dma_queue,
+ ca91cx42_dma_busy(ctrlr->parent));
+
+ /*
+ * Read status register, this register is valid until we kick off a
+ * new transfer.
+ */
+ val = ioread32(bridge->base + DGCS);
+
+ if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
+ CA91CX42_DGCS_PERR)) {
+
+ dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
+ val = ioread32(bridge->base + DCTL);
+ }
+
+ /* Remove list from running list */
+ mutex_lock(&ctrlr->mtx);
+ list_del(&list->list);
+ mutex_unlock(&ctrlr->mtx);
+
+ return retval;
+
+}
+
+static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
+{
+ struct list_head *pos, *temp;
+ struct ca91cx42_dma_entry *entry;
+
+ /* detach and free each entry */
+ list_for_each_safe(pos, temp, &list->entries) {
+ list_del(pos);
+ entry = list_entry(pos, struct ca91cx42_dma_entry, list);
+ kfree(entry);
+ }
+
+ return 0;
+}
+
+/*
+ * All 4 location monitors reside at the same base - this is therefore a
+ * system wide configuration.
+ *
+ * This does not enable the LM monitor - that should be done when the first
+ * callback is attached and disabled when the last callback is removed.
+ */
+static int ca91cx42_lm_set(struct vme_lm_resource *lm,
+ unsigned long long lm_base, u32 aspace, u32 cycle)
+{
+ u32 temp_base, lm_ctl = 0;
+ int i;
+ struct ca91cx42_driver *bridge;
+ struct device *dev;
+
+ bridge = lm->parent->driver_priv;
+ dev = lm->parent->parent;
+
+ /* Check the alignment of the location monitor */
+ temp_base = (u32)lm_base;
+ if (temp_base & 0xffff) {
+ dev_err(dev, "Location monitor must be aligned to 64KB "
+ "boundary");
+ return -EINVAL;
+ }
+
+ mutex_lock(&lm->mtx);
+
+ /* If we already have a callback attached, we can't move it! */
+ for (i = 0; i < lm->monitors; i++) {
+ if (bridge->lm_callback[i] != NULL) {
+ mutex_unlock(&lm->mtx);
+ dev_err(dev, "Location monitor callback attached, "
+ "can't reset\n");
+ return -EBUSY;
+ }
+ }
+
+ switch (aspace) {
+ case VME_A16:
+ lm_ctl |= CA91CX42_LM_CTL_AS_A16;
+ break;
+ case VME_A24:
+ lm_ctl |= CA91CX42_LM_CTL_AS_A24;
+ break;
+ case VME_A32:
+ lm_ctl |= CA91CX42_LM_CTL_AS_A32;
+ break;
+ default:
+ mutex_unlock(&lm->mtx);
+ dev_err(dev, "Invalid address space\n");
+ return -EINVAL;
+ break;
+ }
+
+ if (cycle & VME_SUPER)
+ lm_ctl |= CA91CX42_LM_CTL_SUPR;
+ if (cycle & VME_USER)
+ lm_ctl |= CA91CX42_LM_CTL_NPRIV;
+ if (cycle & VME_PROG)
+ lm_ctl |= CA91CX42_LM_CTL_PGM;
+ if (cycle & VME_DATA)
+ lm_ctl |= CA91CX42_LM_CTL_DATA;
+
+ iowrite32(lm_base, bridge->base + LM_BS);
+ iowrite32(lm_ctl, bridge->base + LM_CTL);
+
+ mutex_unlock(&lm->mtx);
+
+ return 0;
+}
+
+/* Get configuration of the callback monitor and return whether it is enabled
+ * or disabled.
+ */
+static int ca91cx42_lm_get(struct vme_lm_resource *lm,
+ unsigned long long *lm_base, u32 *aspace, u32 *cycle)
+{
+ u32 lm_ctl, enabled = 0;
+ struct ca91cx42_driver *bridge;
+
+ bridge = lm->parent->driver_priv;
+
+ mutex_lock(&lm->mtx);
+
+ *lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
+ lm_ctl = ioread32(bridge->base + LM_CTL);
+
+ if (lm_ctl & CA91CX42_LM_CTL_EN)
+ enabled = 1;
+
+ if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
+ *aspace = VME_A16;
+ if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
+ *aspace = VME_A24;
+ if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
+ *aspace = VME_A32;
+
+ *cycle = 0;
+ if (lm_ctl & CA91CX42_LM_CTL_SUPR)
+ *cycle |= VME_SUPER;
+ if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
+ *cycle |= VME_USER;
+ if (lm_ctl & CA91CX42_LM_CTL_PGM)
+ *cycle |= VME_PROG;
+ if (lm_ctl & CA91CX42_LM_CTL_DATA)
+ *cycle |= VME_DATA;
+
+ mutex_unlock(&lm->mtx);
+
+ return enabled;
+}
+
+/*
+ * Attach a callback to a specific location monitor.
+ *
+ * Callback will be passed the monitor triggered.
+ */
+static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
+ void (*callback)(int))
+{
+ u32 lm_ctl, tmp;
+ struct ca91cx42_driver *bridge;
+ struct device *dev;
+
+ bridge = lm->parent->driver_priv;
+ dev = lm->parent->parent;
+
+ mutex_lock(&lm->mtx);
+
+ /* Ensure that the location monitor is configured - need PGM or DATA */
+ lm_ctl = ioread32(bridge->base + LM_CTL);
+ if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
+ mutex_unlock(&lm->mtx);
+ dev_err(dev, "Location monitor not properly configured\n");
+ return -EINVAL;
+ }
+
+ /* Check that a callback isn't already attached */
+ if (bridge->lm_callback[monitor] != NULL) {
+ mutex_unlock(&lm->mtx);
+ dev_err(dev, "Existing callback attached\n");
+ return -EBUSY;
+ }
+
+ /* Attach callback */
+ bridge->lm_callback[monitor] = callback;
+
+ /* Enable Location Monitor interrupt */
+ tmp = ioread32(bridge->base + LINT_EN);
+ tmp |= CA91CX42_LINT_LM[monitor];
+ iowrite32(tmp, bridge->base + LINT_EN);
+
+ /* Ensure that global Location Monitor Enable set */
+ if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
+ lm_ctl |= CA91CX42_LM_CTL_EN;
+ iowrite32(lm_ctl, bridge->base + LM_CTL);
+ }
+
+ mutex_unlock(&lm->mtx);
+
+ return 0;
+}
+
+/*
+ * Detach a callback function forn a specific location monitor.
+ */
+static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
+{
+ u32 tmp;
+ struct ca91cx42_driver *bridge;
+
+ bridge = lm->parent->driver_priv;
+
+ mutex_lock(&lm->mtx);
+
+ /* Disable Location Monitor and ensure previous interrupts are clear */
+ tmp = ioread32(bridge->base + LINT_EN);
+ tmp &= ~CA91CX42_LINT_LM[monitor];
+ iowrite32(tmp, bridge->base + LINT_EN);
+
+ iowrite32(CA91CX42_LINT_LM[monitor],
+ bridge->base + LINT_STAT);
+
+ /* Detach callback */
+ bridge->lm_callback[monitor] = NULL;
+
+ /* If all location monitors disabled, disable global Location Monitor */
+ if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
+ CA91CX42_LINT_LM3)) == 0) {
+ tmp = ioread32(bridge->base + LM_CTL);
+ tmp &= ~CA91CX42_LM_CTL_EN;
+ iowrite32(tmp, bridge->base + LM_CTL);
+ }
+
+ mutex_unlock(&lm->mtx);
+
+ return 0;
+}
+
+static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
+{
+ u32 slot = 0;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+ if (!geoid) {
+ slot = ioread32(bridge->base + VCSR_BS);
+ slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
+ } else
+ slot = geoid;
+
+ return (int)slot;
+
+}
+
+void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
+ dma_addr_t *dma)
+{
+ struct pci_dev *pdev;
+
+ /* Find pci_dev container of dev */
+ pdev = container_of(parent, struct pci_dev, dev);
+
+ return pci_alloc_consistent(pdev, size, dma);
+}
+
+void ca91cx42_free_consistent(struct device *parent, size_t size, void *vaddr,
+ dma_addr_t dma)
+{
+ struct pci_dev *pdev;
+
+ /* Find pci_dev container of dev */
+ pdev = container_of(parent, struct pci_dev, dev);
+
+ pci_free_consistent(pdev, size, vaddr, dma);
+}
+
+static int __init ca91cx42_init(void)
+{
+ return pci_register_driver(&ca91cx42_driver);
+}
+
+/*
+ * Configure CR/CSR space
+ *
+ * Access to the CR/CSR can be configured at power-up. The location of the
+ * CR/CSR registers in the CR/CSR address space is determined by the boards
+ * Auto-ID or Geographic address. This function ensures that the window is
+ * enabled at an offset consistent with the boards geopgraphic address.
+ */
+static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
+ struct pci_dev *pdev)
+{
+ unsigned int crcsr_addr;
+ int tmp, slot;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+ slot = ca91cx42_slot_get(ca91cx42_bridge);
+
+ /* Write CSR Base Address if slot ID is supplied as a module param */
+ if (geoid)
+ iowrite32(geoid << 27, bridge->base + VCSR_BS);
+
+ dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
+ if (slot == 0) {
+ dev_err(&pdev->dev, "Slot number is unset, not configuring "
+ "CR/CSR space\n");
+ return -EINVAL;
+ }
+
+ /* Allocate mem for CR/CSR image */
+ bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
+ &bridge->crcsr_bus);
+ if (bridge->crcsr_kernel == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
+ "image\n");
+ return -ENOMEM;
+ }
+
+ memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
+
+ crcsr_addr = slot * (512 * 1024);
+ iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
+
+ tmp = ioread32(bridge->base + VCSR_CTL);
+ tmp |= CA91CX42_VCSR_CTL_EN;
+ iowrite32(tmp, bridge->base + VCSR_CTL);
+
+ return 0;
+}
+
+static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
+ struct pci_dev *pdev)
+{
+ u32 tmp;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+ /* Turn off CR/CSR space */
+ tmp = ioread32(bridge->base + VCSR_CTL);
+ tmp &= ~CA91CX42_VCSR_CTL_EN;
+ iowrite32(tmp, bridge->base + VCSR_CTL);
+
+ /* Free image */
+ iowrite32(0, bridge->base + VCSR_TO);
+
+ pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
+ bridge->crcsr_bus);
+}
+
+static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int retval, i;
+ u32 data;
+ struct list_head *pos = NULL;
+ struct vme_bridge *ca91cx42_bridge;
+ struct ca91cx42_driver *ca91cx42_device;
+ struct vme_master_resource *master_image;
+ struct vme_slave_resource *slave_image;
+ struct vme_dma_resource *dma_ctrlr;
+ struct vme_lm_resource *lm;
+
+ /* We want to support more than one of each bridge so we need to
+ * dynamically allocate the bridge structure
+ */
+ ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
+
+ if (ca91cx42_bridge == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for device "
+ "structure\n");
+ retval = -ENOMEM;
+ goto err_struct;
+ }
+
+ ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
+
+ if (ca91cx42_device == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for device "
+ "structure\n");
+ retval = -ENOMEM;
+ goto err_driver;
+ }
+
+ ca91cx42_bridge->driver_priv = ca91cx42_device;
+
+ /* Enable the device */
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "Unable to enable device\n");
+ goto err_enable;
+ }
+
+ /* Map Registers */
+ retval = pci_request_regions(pdev, driver_name);
+ if (retval) {
+ dev_err(&pdev->dev, "Unable to reserve resources\n");
+ goto err_resource;
+ }
+
+ /* map registers in BAR 0 */
+ ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
+ 4096);
+ if (!ca91cx42_device->base) {
+ dev_err(&pdev->dev, "Unable to remap CRG region\n");
+ retval = -EIO;
+ goto err_remap;
+ }
+
+ /* Check to see if the mapping worked out */
+ data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
+ if (data != PCI_VENDOR_ID_TUNDRA) {
+ dev_err(&pdev->dev, "PCI_ID check failed\n");
+ retval = -EIO;
+ goto err_test;
+ }
+
+ /* Initialize wait queues & mutual exclusion flags */
+ init_waitqueue_head(&ca91cx42_device->dma_queue);
+ init_waitqueue_head(&ca91cx42_device->iack_queue);
+ mutex_init(&ca91cx42_device->vme_int);
+ mutex_init(&ca91cx42_device->vme_rmw);
+
+ ca91cx42_bridge->parent = &pdev->dev;
+ strcpy(ca91cx42_bridge->name, driver_name);
+
+ /* Setup IRQ */
+ retval = ca91cx42_irq_init(ca91cx42_bridge);
+ if (retval != 0) {
+ dev_err(&pdev->dev, "Chip Initialization failed.\n");
+ goto err_irq;
+ }
+
+ /* Add master windows to list */
+ INIT_LIST_HEAD(&ca91cx42_bridge->master_resources);
+ for (i = 0; i < CA91C142_MAX_MASTER; i++) {
+ master_image = kmalloc(sizeof(struct vme_master_resource),
+ GFP_KERNEL);
+ if (master_image == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for "
+ "master resource structure\n");
+ retval = -ENOMEM;
+ goto err_master;
+ }
+ master_image->parent = ca91cx42_bridge;
+ spin_lock_init(&master_image->lock);
+ master_image->locked = 0;
+ master_image->number = i;
+ master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
+ VME_CRCSR | VME_USER1 | VME_USER2;
+ master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
+ VME_SUPER | VME_USER | VME_PROG | VME_DATA;
+ master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
+ memset(&master_image->bus_resource, 0,
+ sizeof(struct resource));
+ master_image->kern_base = NULL;
+ list_add_tail(&master_image->list,
+ &ca91cx42_bridge->master_resources);
+ }
+
+ /* Add slave windows to list */
+ INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources);
+ for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
+ slave_image = kmalloc(sizeof(struct vme_slave_resource),
+ GFP_KERNEL);
+ if (slave_image == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for "
+ "slave resource structure\n");
+ retval = -ENOMEM;
+ goto err_slave;
+ }
+ slave_image->parent = ca91cx42_bridge;
+ mutex_init(&slave_image->mtx);
+ slave_image->locked = 0;
+ slave_image->number = i;
+ slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
+ VME_USER2;
+
+ /* Only windows 0 and 4 support A16 */
+ if (i == 0 || i == 4)
+ slave_image->address_attr |= VME_A16;
+
+ slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
+ VME_SUPER | VME_USER | VME_PROG | VME_DATA;
+ list_add_tail(&slave_image->list,
+ &ca91cx42_bridge->slave_resources);
+ }
+
+ /* Add dma engines to list */
+ INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources);
+ for (i = 0; i < CA91C142_MAX_DMA; i++) {
+ dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
+ GFP_KERNEL);
+ if (dma_ctrlr == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for "
+ "dma resource structure\n");
+ retval = -ENOMEM;
+ goto err_dma;
+ }
+ dma_ctrlr->parent = ca91cx42_bridge;
+ mutex_init(&dma_ctrlr->mtx);
+ dma_ctrlr->locked = 0;
+ dma_ctrlr->number = i;
+ dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
+ VME_DMA_MEM_TO_VME;
+ INIT_LIST_HEAD(&dma_ctrlr->pending);
+ INIT_LIST_HEAD(&dma_ctrlr->running);
+ list_add_tail(&dma_ctrlr->list,
+ &ca91cx42_bridge->dma_resources);
+ }
+
+ /* Add location monitor to list */
+ INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources);
+ lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
+ if (lm == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for "
+ "location monitor resource structure\n");
+ retval = -ENOMEM;
+ goto err_lm;
+ }
+ lm->parent = ca91cx42_bridge;
+ mutex_init(&lm->mtx);
+ lm->locked = 0;
+ lm->number = 1;
+ lm->monitors = 4;
+ list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
+
+ ca91cx42_bridge->slave_get = ca91cx42_slave_get;
+ ca91cx42_bridge->slave_set = ca91cx42_slave_set;
+ ca91cx42_bridge->master_get = ca91cx42_master_get;
+ ca91cx42_bridge->master_set = ca91cx42_master_set;
+ ca91cx42_bridge->master_read = ca91cx42_master_read;
+ ca91cx42_bridge->master_write = ca91cx42_master_write;
+ ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
+ ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
+ ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
+ ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
+ ca91cx42_bridge->irq_set = ca91cx42_irq_set;
+ ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
+ ca91cx42_bridge->lm_set = ca91cx42_lm_set;
+ ca91cx42_bridge->lm_get = ca91cx42_lm_get;
+ ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
+ ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
+ ca91cx42_bridge->slot_get = ca91cx42_slot_get;
+ ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
+ ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
+
+ data = ioread32(ca91cx42_device->base + MISC_CTL);
+ dev_info(&pdev->dev, "Board is%s the VME system controller\n",
+ (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
+ dev_info(&pdev->dev, "Slot ID is %d\n",
+ ca91cx42_slot_get(ca91cx42_bridge));
+
+ if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
+ dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
+
+ /* Need to save ca91cx42_bridge pointer locally in link list for use in
+ * ca91cx42_remove()
+ */
+ retval = vme_register_bridge(ca91cx42_bridge);
+ if (retval != 0) {
+ dev_err(&pdev->dev, "Chip Registration failed.\n");
+ goto err_reg;
+ }
+
+ pci_set_drvdata(pdev, ca91cx42_bridge);
+
+ return 0;
+
+err_reg:
+ ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
+err_lm:
+ /* resources are stored in link list */
+ list_for_each(pos, &ca91cx42_bridge->lm_resources) {
+ lm = list_entry(pos, struct vme_lm_resource, list);
+ list_del(pos);
+ kfree(lm);
+ }
+err_dma:
+ /* resources are stored in link list */
+ list_for_each(pos, &ca91cx42_bridge->dma_resources) {
+ dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
+ list_del(pos);
+ kfree(dma_ctrlr);
+ }
+err_slave:
+ /* resources are stored in link list */
+ list_for_each(pos, &ca91cx42_bridge->slave_resources) {
+ slave_image = list_entry(pos, struct vme_slave_resource, list);
+ list_del(pos);
+ kfree(slave_image);
+ }
+err_master:
+ /* resources are stored in link list */
+ list_for_each(pos, &ca91cx42_bridge->master_resources) {
+ master_image = list_entry(pos, struct vme_master_resource,
+ list);
+ list_del(pos);
+ kfree(master_image);
+ }
+
+ ca91cx42_irq_exit(ca91cx42_device, pdev);
+err_irq:
+err_test:
+ iounmap(ca91cx42_device->base);
+err_remap:
+ pci_release_regions(pdev);
+err_resource:
+ pci_disable_device(pdev);
+err_enable:
+ kfree(ca91cx42_device);
+err_driver:
+ kfree(ca91cx42_bridge);
+err_struct:
+ return retval;
+
+}
+
+static void ca91cx42_remove(struct pci_dev *pdev)
+{
+ struct list_head *pos = NULL;
+ struct vme_master_resource *master_image;
+ struct vme_slave_resource *slave_image;
+ struct vme_dma_resource *dma_ctrlr;
+ struct vme_lm_resource *lm;
+ struct ca91cx42_driver *bridge;
+ struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
+
+ bridge = ca91cx42_bridge->driver_priv;
+
+
+ /* Turn off Ints */
+ iowrite32(0, bridge->base + LINT_EN);
+
+ /* Turn off the windows */
+ iowrite32(0x00800000, bridge->base + LSI0_CTL);
+ iowrite32(0x00800000, bridge->base + LSI1_CTL);
+ iowrite32(0x00800000, bridge->base + LSI2_CTL);
+ iowrite32(0x00800000, bridge->base + LSI3_CTL);
+ iowrite32(0x00800000, bridge->base + LSI4_CTL);
+ iowrite32(0x00800000, bridge->base + LSI5_CTL);
+ iowrite32(0x00800000, bridge->base + LSI6_CTL);
+ iowrite32(0x00800000, bridge->base + LSI7_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI0_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI1_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI2_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI3_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI4_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI5_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI6_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI7_CTL);
+
+ vme_unregister_bridge(ca91cx42_bridge);
+
+ ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
+
+ /* resources are stored in link list */
+ list_for_each(pos, &ca91cx42_bridge->lm_resources) {
+ lm = list_entry(pos, struct vme_lm_resource, list);
+ list_del(pos);
+ kfree(lm);
+ }
+
+ /* resources are stored in link list */
+ list_for_each(pos, &ca91cx42_bridge->dma_resources) {
+ dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
+ list_del(pos);
+ kfree(dma_ctrlr);
+ }
+
+ /* resources are stored in link list */
+ list_for_each(pos, &ca91cx42_bridge->slave_resources) {
+ slave_image = list_entry(pos, struct vme_slave_resource, list);
+ list_del(pos);
+ kfree(slave_image);
+ }
+
+ /* resources are stored in link list */
+ list_for_each(pos, &ca91cx42_bridge->master_resources) {
+ master_image = list_entry(pos, struct vme_master_resource,
+ list);
+ list_del(pos);
+ kfree(master_image);
+ }
+
+ ca91cx42_irq_exit(bridge, pdev);
+
+ iounmap(bridge->base);
+
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+
+ kfree(ca91cx42_bridge);
+}
+
+static void __exit ca91cx42_exit(void)
+{
+ pci_unregister_driver(&ca91cx42_driver);
+}
+
+MODULE_PARM_DESC(geoid, "Override geographical addressing");
+module_param(geoid, int, 0);
+
+MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
+MODULE_LICENSE("GPL");
+
+module_init(ca91cx42_init);
+module_exit(ca91cx42_exit);
--- /dev/null
+/*
+ * ca91c042.h
+ *
+ * Support for the Tundra Universe 1 and Universe II VME bridge chips
+ *
+ * Author: Tom Armistead
+ * Updated by Ajit Prem
+ * Copyright 2004 Motorola Inc.
+ *
+ * Further updated by Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * Derived from ca91c042.h by Michael Wyrick
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _CA91CX42_H
+#define _CA91CX42_H
+
+#ifndef PCI_VENDOR_ID_TUNDRA
+#define PCI_VENDOR_ID_TUNDRA 0x10e3
+#endif
+
+#ifndef PCI_DEVICE_ID_TUNDRA_CA91C142
+#define PCI_DEVICE_ID_TUNDRA_CA91C142 0x0000
+#endif
+
+/*
+ * Define the number of each that the CA91C142 supports.
+ */
+#define CA91C142_MAX_MASTER 8 /* Max Master Windows */
+#define CA91C142_MAX_SLAVE 8 /* Max Slave Windows */
+#define CA91C142_MAX_DMA 1 /* Max DMA Controllers */
+#define CA91C142_MAX_MAILBOX 4 /* Max Mail Box registers */
+
+/* Structure used to hold driver specific information */
+struct ca91cx42_driver {
+ void __iomem *base; /* Base Address of device registers */
+ wait_queue_head_t dma_queue;
+ wait_queue_head_t iack_queue;
+ wait_queue_head_t mbox_queue;
+ void (*lm_callback[4])(int); /* Called in interrupt handler */
+ void *crcsr_kernel;
+ dma_addr_t crcsr_bus;
+ struct mutex vme_rmw; /* Only one RMW cycle at a time */
+ struct mutex vme_int; /*
+ * Only one VME interrupt can be
+ * generated at a time, provide locking
+ */
+};
+
+/* See Page 2-77 in the Universe User Manual */
+struct ca91cx42_dma_descriptor {
+ unsigned int dctl; /* DMA Control */
+ unsigned int dtbc; /* Transfer Byte Count */
+ unsigned int dla; /* PCI Address */
+ unsigned int res1; /* Reserved */
+ unsigned int dva; /* Vme Address */
+ unsigned int res2; /* Reserved */
+ unsigned int dcpp; /* Pointer to Numed Cmd Packet with rPN */
+ unsigned int res3; /* Reserved */
+};
+
+struct ca91cx42_dma_entry {
+ struct ca91cx42_dma_descriptor descriptor;
+ struct list_head list;
+};
+
+/* Universe Register Offsets */
+/* general PCI configuration registers */
+#define CA91CX42_PCI_ID 0x000
+#define CA91CX42_PCI_CSR 0x004
+#define CA91CX42_PCI_CLASS 0x008
+#define CA91CX42_PCI_MISC0 0x00C
+#define CA91CX42_PCI_BS 0x010
+#define CA91CX42_PCI_MISC1 0x03C
+
+#define LSI0_CTL 0x0100
+#define LSI0_BS 0x0104
+#define LSI0_BD 0x0108
+#define LSI0_TO 0x010C
+
+#define LSI1_CTL 0x0114
+#define LSI1_BS 0x0118
+#define LSI1_BD 0x011C
+#define LSI1_TO 0x0120
+
+#define LSI2_CTL 0x0128
+#define LSI2_BS 0x012C
+#define LSI2_BD 0x0130
+#define LSI2_TO 0x0134
+
+#define LSI3_CTL 0x013C
+#define LSI3_BS 0x0140
+#define LSI3_BD 0x0144
+#define LSI3_TO 0x0148
+
+#define LSI4_CTL 0x01A0
+#define LSI4_BS 0x01A4
+#define LSI4_BD 0x01A8
+#define LSI4_TO 0x01AC
+
+#define LSI5_CTL 0x01B4
+#define LSI5_BS 0x01B8
+#define LSI5_BD 0x01BC
+#define LSI5_TO 0x01C0
+
+#define LSI6_CTL 0x01C8
+#define LSI6_BS 0x01CC
+#define LSI6_BD 0x01D0
+#define LSI6_TO 0x01D4
+
+#define LSI7_CTL 0x01DC
+#define LSI7_BS 0x01E0
+#define LSI7_BD 0x01E4
+#define LSI7_TO 0x01E8
+
+static const int CA91CX42_LSI_CTL[] = { LSI0_CTL, LSI1_CTL, LSI2_CTL, LSI3_CTL,
+ LSI4_CTL, LSI5_CTL, LSI6_CTL, LSI7_CTL };
+
+static const int CA91CX42_LSI_BS[] = { LSI0_BS, LSI1_BS, LSI2_BS, LSI3_BS,
+ LSI4_BS, LSI5_BS, LSI6_BS, LSI7_BS };
+
+static const int CA91CX42_LSI_BD[] = { LSI0_BD, LSI1_BD, LSI2_BD, LSI3_BD,
+ LSI4_BD, LSI5_BD, LSI6_BD, LSI7_BD };
+
+static const int CA91CX42_LSI_TO[] = { LSI0_TO, LSI1_TO, LSI2_TO, LSI3_TO,
+ LSI4_TO, LSI5_TO, LSI6_TO, LSI7_TO };
+
+#define SCYC_CTL 0x0170
+#define SCYC_ADDR 0x0174
+#define SCYC_EN 0x0178
+#define SCYC_CMP 0x017C
+#define SCYC_SWP 0x0180
+#define LMISC 0x0184
+#define SLSI 0x0188
+#define L_CMDERR 0x018C
+#define LAERR 0x0190
+
+#define DCTL 0x0200
+#define DTBC 0x0204
+#define DLA 0x0208
+#define DVA 0x0210
+#define DCPP 0x0218
+#define DGCS 0x0220
+#define D_LLUE 0x0224
+
+#define LINT_EN 0x0300
+#define LINT_STAT 0x0304
+#define LINT_MAP0 0x0308
+#define LINT_MAP1 0x030C
+#define VINT_EN 0x0310
+#define VINT_STAT 0x0314
+#define VINT_MAP0 0x0318
+#define VINT_MAP1 0x031C
+#define STATID 0x0320
+
+#define V1_STATID 0x0324
+#define V2_STATID 0x0328
+#define V3_STATID 0x032C
+#define V4_STATID 0x0330
+#define V5_STATID 0x0334
+#define V6_STATID 0x0338
+#define V7_STATID 0x033C
+
+static const int CA91CX42_V_STATID[8] = { 0, V1_STATID, V2_STATID, V3_STATID,
+ V4_STATID, V5_STATID, V6_STATID,
+ V7_STATID };
+
+#define LINT_MAP2 0x0340
+#define VINT_MAP2 0x0344
+
+#define MBOX0 0x0348
+#define MBOX1 0x034C
+#define MBOX2 0x0350
+#define MBOX3 0x0354
+#define SEMA0 0x0358
+#define SEMA1 0x035C
+
+#define MAST_CTL 0x0400
+#define MISC_CTL 0x0404
+#define MISC_STAT 0x0408
+#define USER_AM 0x040C
+
+#define VSI0_CTL 0x0F00
+#define VSI0_BS 0x0F04
+#define VSI0_BD 0x0F08
+#define VSI0_TO 0x0F0C
+
+#define VSI1_CTL 0x0F14
+#define VSI1_BS 0x0F18
+#define VSI1_BD 0x0F1C
+#define VSI1_TO 0x0F20
+
+#define VSI2_CTL 0x0F28
+#define VSI2_BS 0x0F2C
+#define VSI2_BD 0x0F30
+#define VSI2_TO 0x0F34
+
+#define VSI3_CTL 0x0F3C
+#define VSI3_BS 0x0F40
+#define VSI3_BD 0x0F44
+#define VSI3_TO 0x0F48
+
+#define LM_CTL 0x0F64
+#define LM_BS 0x0F68
+
+#define VRAI_CTL 0x0F70
+
+#define VRAI_BS 0x0F74
+#define VCSR_CTL 0x0F80
+#define VCSR_TO 0x0F84
+#define V_AMERR 0x0F88
+#define VAERR 0x0F8C
+
+#define VSI4_CTL 0x0F90
+#define VSI4_BS 0x0F94
+#define VSI4_BD 0x0F98
+#define VSI4_TO 0x0F9C
+
+#define VSI5_CTL 0x0FA4
+#define VSI5_BS 0x0FA8
+#define VSI5_BD 0x0FAC
+#define VSI5_TO 0x0FB0
+
+#define VSI6_CTL 0x0FB8
+#define VSI6_BS 0x0FBC
+#define VSI6_BD 0x0FC0
+#define VSI6_TO 0x0FC4
+
+#define VSI7_CTL 0x0FCC
+#define VSI7_BS 0x0FD0
+#define VSI7_BD 0x0FD4
+#define VSI7_TO 0x0FD8
+
+static const int CA91CX42_VSI_CTL[] = { VSI0_CTL, VSI1_CTL, VSI2_CTL, VSI3_CTL,
+ VSI4_CTL, VSI5_CTL, VSI6_CTL, VSI7_CTL };
+
+static const int CA91CX42_VSI_BS[] = { VSI0_BS, VSI1_BS, VSI2_BS, VSI3_BS,
+ VSI4_BS, VSI5_BS, VSI6_BS, VSI7_BS };
+
+static const int CA91CX42_VSI_BD[] = { VSI0_BD, VSI1_BD, VSI2_BD, VSI3_BD,
+ VSI4_BD, VSI5_BD, VSI6_BD, VSI7_BD };
+
+static const int CA91CX42_VSI_TO[] = { VSI0_TO, VSI1_TO, VSI2_TO, VSI3_TO,
+ VSI4_TO, VSI5_TO, VSI6_TO, VSI7_TO };
+
+#define VCSR_CLR 0x0FF4
+#define VCSR_SET 0x0FF8
+#define VCSR_BS 0x0FFC
+
+/*
+ * PCI Class Register
+ * offset 008
+ */
+#define CA91CX42_BM_PCI_CLASS_BASE 0xFF000000
+#define CA91CX42_OF_PCI_CLASS_BASE 24
+#define CA91CX42_BM_PCI_CLASS_SUB 0x00FF0000
+#define CA91CX42_OF_PCI_CLASS_SUB 16
+#define CA91CX42_BM_PCI_CLASS_PROG 0x0000FF00
+#define CA91CX42_OF_PCI_CLASS_PROG 8
+#define CA91CX42_BM_PCI_CLASS_RID 0x000000FF
+#define CA91CX42_OF_PCI_CLASS_RID 0
+
+#define CA91CX42_OF_PCI_CLASS_RID_UNIVERSE_I 0
+#define CA91CX42_OF_PCI_CLASS_RID_UNIVERSE_II 1
+
+/*
+ * PCI Misc Register
+ * offset 00C
+ */
+#define CA91CX42_BM_PCI_MISC0_BISTC 0x80000000
+#define CA91CX42_BM_PCI_MISC0_SBIST 0x60000000
+#define CA91CX42_BM_PCI_MISC0_CCODE 0x0F000000
+#define CA91CX42_BM_PCI_MISC0_MFUNCT 0x00800000
+#define CA91CX42_BM_PCI_MISC0_LAYOUT 0x007F0000
+#define CA91CX42_BM_PCI_MISC0_LTIMER 0x0000FF00
+#define CA91CX42_OF_PCI_MISC0_LTIMER 8
+
+
+/*
+ * LSI Control Register
+ * offset 100
+ */
+#define CA91CX42_LSI_CTL_EN (1<<31)
+#define CA91CX42_LSI_CTL_PWEN (1<<30)
+
+#define CA91CX42_LSI_CTL_VDW_M (3<<22)
+#define CA91CX42_LSI_CTL_VDW_D8 0
+#define CA91CX42_LSI_CTL_VDW_D16 (1<<22)
+#define CA91CX42_LSI_CTL_VDW_D32 (1<<23)
+#define CA91CX42_LSI_CTL_VDW_D64 (3<<22)
+
+#define CA91CX42_LSI_CTL_VAS_M (7<<16)
+#define CA91CX42_LSI_CTL_VAS_A16 0
+#define CA91CX42_LSI_CTL_VAS_A24 (1<<16)
+#define CA91CX42_LSI_CTL_VAS_A32 (1<<17)
+#define CA91CX42_LSI_CTL_VAS_CRCSR (5<<16)
+#define CA91CX42_LSI_CTL_VAS_USER1 (3<<17)
+#define CA91CX42_LSI_CTL_VAS_USER2 (7<<16)
+
+#define CA91CX42_LSI_CTL_PGM_M (1<<14)
+#define CA91CX42_LSI_CTL_PGM_DATA 0
+#define CA91CX42_LSI_CTL_PGM_PGM (1<<14)
+
+#define CA91CX42_LSI_CTL_SUPER_M (1<<12)
+#define CA91CX42_LSI_CTL_SUPER_NPRIV 0
+#define CA91CX42_LSI_CTL_SUPER_SUPR (1<<12)
+
+#define CA91CX42_LSI_CTL_VCT_M (1<<8)
+#define CA91CX42_LSI_CTL_VCT_BLT (1<<8)
+#define CA91CX42_LSI_CTL_VCT_MBLT (1<<8)
+#define CA91CX42_LSI_CTL_LAS (1<<0)
+
+/*
+ * SCYC_CTL Register
+ * offset 178
+ */
+#define CA91CX42_SCYC_CTL_LAS_PCIMEM 0
+#define CA91CX42_SCYC_CTL_LAS_PCIIO (1<<2)
+
+#define CA91CX42_SCYC_CTL_CYC_M (3<<0)
+#define CA91CX42_SCYC_CTL_CYC_RMW (1<<0)
+#define CA91CX42_SCYC_CTL_CYC_ADOH (1<<1)
+
+/*
+ * LMISC Register
+ * offset 184
+ */
+#define CA91CX42_BM_LMISC_CRT 0xF0000000
+#define CA91CX42_OF_LMISC_CRT 28
+#define CA91CX42_BM_LMISC_CWT 0x0F000000
+#define CA91CX42_OF_LMISC_CWT 24
+
+/*
+ * SLSI Register
+ * offset 188
+ */
+#define CA91CX42_BM_SLSI_EN 0x80000000
+#define CA91CX42_BM_SLSI_PWEN 0x40000000
+#define CA91CX42_BM_SLSI_VDW 0x00F00000
+#define CA91CX42_OF_SLSI_VDW 20
+#define CA91CX42_BM_SLSI_PGM 0x0000F000
+#define CA91CX42_OF_SLSI_PGM 12
+#define CA91CX42_BM_SLSI_SUPER 0x00000F00
+#define CA91CX42_OF_SLSI_SUPER 8
+#define CA91CX42_BM_SLSI_BS 0x000000F6
+#define CA91CX42_OF_SLSI_BS 2
+#define CA91CX42_BM_SLSI_LAS 0x00000003
+#define CA91CX42_OF_SLSI_LAS 0
+#define CA91CX42_BM_SLSI_RESERVED 0x3F0F0000
+
+/*
+ * DCTL Register
+ * offset 200
+ */
+#define CA91CX42_DCTL_L2V (1<<31)
+#define CA91CX42_DCTL_VDW_M (3<<22)
+#define CA91CX42_DCTL_VDW_M (3<<22)
+#define CA91CX42_DCTL_VDW_D8 0
+#define CA91CX42_DCTL_VDW_D16 (1<<22)
+#define CA91CX42_DCTL_VDW_D32 (1<<23)
+#define CA91CX42_DCTL_VDW_D64 (3<<22)
+
+#define CA91CX42_DCTL_VAS_M (7<<16)
+#define CA91CX42_DCTL_VAS_A16 0
+#define CA91CX42_DCTL_VAS_A24 (1<<16)
+#define CA91CX42_DCTL_VAS_A32 (1<<17)
+#define CA91CX42_DCTL_VAS_USER1 (3<<17)
+#define CA91CX42_DCTL_VAS_USER2 (7<<16)
+
+#define CA91CX42_DCTL_PGM_M (1<<14)
+#define CA91CX42_DCTL_PGM_DATA 0
+#define CA91CX42_DCTL_PGM_PGM (1<<14)
+
+#define CA91CX42_DCTL_SUPER_M (1<<12)
+#define CA91CX42_DCTL_SUPER_NPRIV 0
+#define CA91CX42_DCTL_SUPER_SUPR (1<<12)
+
+#define CA91CX42_DCTL_VCT_M (1<<8)
+#define CA91CX42_DCTL_VCT_BLT (1<<8)
+#define CA91CX42_DCTL_LD64EN (1<<7)
+
+/*
+ * DCPP Register
+ * offset 218
+ */
+#define CA91CX42_DCPP_M 0xf
+#define CA91CX42_DCPP_NULL (1<<0)
+
+/*
+ * DMA General Control/Status Register (DGCS)
+ * offset 220
+ */
+#define CA91CX42_DGCS_GO (1<<31)
+#define CA91CX42_DGCS_STOP_REQ (1<<30)
+#define CA91CX42_DGCS_HALT_REQ (1<<29)
+#define CA91CX42_DGCS_CHAIN (1<<27)
+
+#define CA91CX42_DGCS_VON_M (7<<20)
+
+#define CA91CX42_DGCS_VOFF_M (0xf<<16)
+
+#define CA91CX42_DGCS_ACT (1<<15)
+#define CA91CX42_DGCS_STOP (1<<14)
+#define CA91CX42_DGCS_HALT (1<<13)
+#define CA91CX42_DGCS_DONE (1<<11)
+#define CA91CX42_DGCS_LERR (1<<10)
+#define CA91CX42_DGCS_VERR (1<<9)
+#define CA91CX42_DGCS_PERR (1<<8)
+#define CA91CX42_DGCS_INT_STOP (1<<6)
+#define CA91CX42_DGCS_INT_HALT (1<<5)
+#define CA91CX42_DGCS_INT_DONE (1<<3)
+#define CA91CX42_DGCS_INT_LERR (1<<2)
+#define CA91CX42_DGCS_INT_VERR (1<<1)
+#define CA91CX42_DGCS_INT_PERR (1<<0)
+
+/*
+ * PCI Interrupt Enable Register
+ * offset 300
+ */
+#define CA91CX42_LINT_LM3 0x00800000
+#define CA91CX42_LINT_LM2 0x00400000
+#define CA91CX42_LINT_LM1 0x00200000
+#define CA91CX42_LINT_LM0 0x00100000
+#define CA91CX42_LINT_MBOX3 0x00080000
+#define CA91CX42_LINT_MBOX2 0x00040000
+#define CA91CX42_LINT_MBOX1 0x00020000
+#define CA91CX42_LINT_MBOX0 0x00010000
+#define CA91CX42_LINT_ACFAIL 0x00008000
+#define CA91CX42_LINT_SYSFAIL 0x00004000
+#define CA91CX42_LINT_SW_INT 0x00002000
+#define CA91CX42_LINT_SW_IACK 0x00001000
+
+#define CA91CX42_LINT_VERR 0x00000400
+#define CA91CX42_LINT_LERR 0x00000200
+#define CA91CX42_LINT_DMA 0x00000100
+#define CA91CX42_LINT_VIRQ7 0x00000080
+#define CA91CX42_LINT_VIRQ6 0x00000040
+#define CA91CX42_LINT_VIRQ5 0x00000020
+#define CA91CX42_LINT_VIRQ4 0x00000010
+#define CA91CX42_LINT_VIRQ3 0x00000008
+#define CA91CX42_LINT_VIRQ2 0x00000004
+#define CA91CX42_LINT_VIRQ1 0x00000002
+#define CA91CX42_LINT_VOWN 0x00000001
+
+static const int CA91CX42_LINT_VIRQ[] = { 0, CA91CX42_LINT_VIRQ1,
+ CA91CX42_LINT_VIRQ2, CA91CX42_LINT_VIRQ3,
+ CA91CX42_LINT_VIRQ4, CA91CX42_LINT_VIRQ5,
+ CA91CX42_LINT_VIRQ6, CA91CX42_LINT_VIRQ7 };
+
+#define CA91CX42_LINT_MBOX 0x000F0000
+
+static const int CA91CX42_LINT_LM[] = { CA91CX42_LINT_LM0, CA91CX42_LINT_LM1,
+ CA91CX42_LINT_LM2, CA91CX42_LINT_LM3 };
+
+/*
+ * MAST_CTL Register
+ * offset 400
+ */
+#define CA91CX42_BM_MAST_CTL_MAXRTRY 0xF0000000
+#define CA91CX42_OF_MAST_CTL_MAXRTRY 28
+#define CA91CX42_BM_MAST_CTL_PWON 0x0F000000
+#define CA91CX42_OF_MAST_CTL_PWON 24
+#define CA91CX42_BM_MAST_CTL_VRL 0x00C00000
+#define CA91CX42_OF_MAST_CTL_VRL 22
+#define CA91CX42_BM_MAST_CTL_VRM 0x00200000
+#define CA91CX42_BM_MAST_CTL_VREL 0x00100000
+#define CA91CX42_BM_MAST_CTL_VOWN 0x00080000
+#define CA91CX42_BM_MAST_CTL_VOWN_ACK 0x00040000
+#define CA91CX42_BM_MAST_CTL_PABS 0x00001000
+#define CA91CX42_BM_MAST_CTL_BUS_NO 0x0000000F
+#define CA91CX42_OF_MAST_CTL_BUS_NO 0
+
+/*
+ * MISC_CTL Register
+ * offset 404
+ */
+#define CA91CX42_MISC_CTL_VBTO 0xF0000000
+#define CA91CX42_MISC_CTL_VARB 0x04000000
+#define CA91CX42_MISC_CTL_VARBTO 0x03000000
+#define CA91CX42_MISC_CTL_SW_LRST 0x00800000
+#define CA91CX42_MISC_CTL_SW_SRST 0x00400000
+#define CA91CX42_MISC_CTL_BI 0x00100000
+#define CA91CX42_MISC_CTL_ENGBI 0x00080000
+#define CA91CX42_MISC_CTL_RESCIND 0x00040000
+#define CA91CX42_MISC_CTL_SYSCON 0x00020000
+#define CA91CX42_MISC_CTL_V64AUTO 0x00010000
+#define CA91CX42_MISC_CTL_RESERVED 0x0820FFFF
+
+#define CA91CX42_OF_MISC_CTL_VARBTO 24
+#define CA91CX42_OF_MISC_CTL_VBTO 28
+
+/*
+ * MISC_STAT Register
+ * offset 408
+ */
+#define CA91CX42_BM_MISC_STAT_ENDIAN 0x80000000
+#define CA91CX42_BM_MISC_STAT_LCLSIZE 0x40000000
+#define CA91CX42_BM_MISC_STAT_DY4AUTO 0x08000000
+#define CA91CX42_BM_MISC_STAT_MYBBSY 0x00200000
+#define CA91CX42_BM_MISC_STAT_DY4DONE 0x00080000
+#define CA91CX42_BM_MISC_STAT_TXFE 0x00040000
+#define CA91CX42_BM_MISC_STAT_RXFE 0x00020000
+#define CA91CX42_BM_MISC_STAT_DY4AUTOID 0x0000FF00
+#define CA91CX42_OF_MISC_STAT_DY4AUTOID 8
+
+/*
+ * VSI Control Register
+ * offset F00
+ */
+#define CA91CX42_VSI_CTL_EN (1<<31)
+#define CA91CX42_VSI_CTL_PWEN (1<<30)
+#define CA91CX42_VSI_CTL_PREN (1<<29)
+
+#define CA91CX42_VSI_CTL_PGM_M (3<<22)
+#define CA91CX42_VSI_CTL_PGM_DATA (1<<22)
+#define CA91CX42_VSI_CTL_PGM_PGM (1<<23)
+
+#define CA91CX42_VSI_CTL_SUPER_M (3<<20)
+#define CA91CX42_VSI_CTL_SUPER_NPRIV (1<<20)
+#define CA91CX42_VSI_CTL_SUPER_SUPR (1<<21)
+
+#define CA91CX42_VSI_CTL_VAS_M (7<<16)
+#define CA91CX42_VSI_CTL_VAS_A16 0
+#define CA91CX42_VSI_CTL_VAS_A24 (1<<16)
+#define CA91CX42_VSI_CTL_VAS_A32 (1<<17)
+#define CA91CX42_VSI_CTL_VAS_USER1 (3<<17)
+#define CA91CX42_VSI_CTL_VAS_USER2 (7<<16)
+
+#define CA91CX42_VSI_CTL_LD64EN (1<<7)
+#define CA91CX42_VSI_CTL_LLRMW (1<<6)
+
+#define CA91CX42_VSI_CTL_LAS_M (3<<0)
+#define CA91CX42_VSI_CTL_LAS_PCI_MS 0
+#define CA91CX42_VSI_CTL_LAS_PCI_IO (1<<0)
+#define CA91CX42_VSI_CTL_LAS_PCI_CONF (1<<1)
+
+/* LM_CTL Register
+ * offset F64
+ */
+#define CA91CX42_LM_CTL_EN (1<<31)
+#define CA91CX42_LM_CTL_PGM (1<<23)
+#define CA91CX42_LM_CTL_DATA (1<<22)
+#define CA91CX42_LM_CTL_SUPR (1<<21)
+#define CA91CX42_LM_CTL_NPRIV (1<<20)
+#define CA91CX42_LM_CTL_AS_M (5<<16)
+#define CA91CX42_LM_CTL_AS_A16 0
+#define CA91CX42_LM_CTL_AS_A24 (1<<16)
+#define CA91CX42_LM_CTL_AS_A32 (1<<17)
+
+/*
+ * VRAI_CTL Register
+ * offset F70
+ */
+#define CA91CX42_BM_VRAI_CTL_EN 0x80000000
+#define CA91CX42_BM_VRAI_CTL_PGM 0x00C00000
+#define CA91CX42_OF_VRAI_CTL_PGM 22
+#define CA91CX42_BM_VRAI_CTL_SUPER 0x00300000
+#define CA91CX42_OF_VRAI_CTL_SUPER 20
+#define CA91CX42_BM_VRAI_CTL_VAS 0x00030000
+#define CA91CX42_OF_VRAI_CTL_VAS 16
+
+/* VCSR_CTL Register
+ * offset F80
+ */
+#define CA91CX42_VCSR_CTL_EN (1<<31)
+
+#define CA91CX42_VCSR_CTL_LAS_M (3<<0)
+#define CA91CX42_VCSR_CTL_LAS_PCI_MS 0
+#define CA91CX42_VCSR_CTL_LAS_PCI_IO (1<<0)
+#define CA91CX42_VCSR_CTL_LAS_PCI_CONF (1<<1)
+
+/* VCSR_BS Register
+ * offset FFC
+ */
+#define CA91CX42_VCSR_BS_SLOT_M (0x1F<<27)
+
+#endif /* _CA91CX42_H */
--- /dev/null
+/*
+ * Support for the Tundra TSI148 VME-PCI Bridge Chip
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * Based on work by Tom Armistead and Ajit Prem
+ * Copyright 2004 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/byteorder/generic.h>
+#include <linux/vme.h>
+
+#include "../vme_bridge.h"
+#include "vme_tsi148.h"
+
+static int __init tsi148_init(void);
+static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
+static void tsi148_remove(struct pci_dev *);
+static void __exit tsi148_exit(void);
+
+
+/* Module parameter */
+static bool err_chk;
+static int geoid;
+
+static const char driver_name[] = "vme_tsi148";
+
+static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
+ { },
+};
+
+static struct pci_driver tsi148_driver = {
+ .name = driver_name,
+ .id_table = tsi148_ids,
+ .probe = tsi148_probe,
+ .remove = tsi148_remove,
+};
+
+static void reg_join(unsigned int high, unsigned int low,
+ unsigned long long *variable)
+{
+ *variable = (unsigned long long)high << 32;
+ *variable |= (unsigned long long)low;
+}
+
+static void reg_split(unsigned long long variable, unsigned int *high,
+ unsigned int *low)
+{
+ *low = (unsigned int)variable & 0xFFFFFFFF;
+ *high = (unsigned int)(variable >> 32);
+}
+
+/*
+ * Wakes up DMA queue.
+ */
+static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
+ int channel_mask)
+{
+ u32 serviced = 0;
+
+ if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
+ wake_up(&bridge->dma_queue[0]);
+ serviced |= TSI148_LCSR_INTC_DMA0C;
+ }
+ if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
+ wake_up(&bridge->dma_queue[1]);
+ serviced |= TSI148_LCSR_INTC_DMA1C;
+ }
+
+ return serviced;
+}
+
+/*
+ * Wake up location monitor queue
+ */
+static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
+{
+ int i;
+ u32 serviced = 0;
+
+ for (i = 0; i < 4; i++) {
+ if (stat & TSI148_LCSR_INTS_LMS[i]) {
+ /* We only enable interrupts if the callback is set */
+ bridge->lm_callback[i](i);
+ serviced |= TSI148_LCSR_INTC_LMC[i];
+ }
+ }
+
+ return serviced;
+}
+
+/*
+ * Wake up mail box queue.
+ *
+ * XXX This functionality is not exposed up though API.
+ */
+static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
+{
+ int i;
+ u32 val;
+ u32 serviced = 0;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ for (i = 0; i < 4; i++) {
+ if (stat & TSI148_LCSR_INTS_MBS[i]) {
+ val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
+ dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
+ ": 0x%x\n", i, val);
+ serviced |= TSI148_LCSR_INTC_MBC[i];
+ }
+ }
+
+ return serviced;
+}
+
+/*
+ * Display error & status message when PERR (PCI) exception interrupt occurs.
+ */
+static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
+{
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
+ "attributes: %08x\n",
+ ioread32be(bridge->base + TSI148_LCSR_EDPAU),
+ ioread32be(bridge->base + TSI148_LCSR_EDPAL),
+ ioread32be(bridge->base + TSI148_LCSR_EDPAT));
+
+ dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
+ "completion reg: %08x\n",
+ ioread32be(bridge->base + TSI148_LCSR_EDPXA),
+ ioread32be(bridge->base + TSI148_LCSR_EDPXS));
+
+ iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
+
+ return TSI148_LCSR_INTC_PERRC;
+}
+
+/*
+ * Save address and status when VME error interrupt occurs.
+ */
+static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
+{
+ unsigned int error_addr_high, error_addr_low;
+ unsigned long long error_addr;
+ u32 error_attrib;
+ struct vme_bus_error *error;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
+ error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
+ error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
+
+ reg_join(error_addr_high, error_addr_low, &error_addr);
+
+ /* Check for exception register overflow (we have lost error data) */
+ if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
+ dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
+ "Occurred\n");
+ }
+
+ error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
+ if (error) {
+ error->address = error_addr;
+ error->attributes = error_attrib;
+ list_add_tail(&error->list, &tsi148_bridge->vme_errors);
+ } else {
+ dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
+ "VMEbus Error reporting\n");
+ dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
+ "0x%llx, attributes: %08x\n", error_addr, error_attrib);
+ }
+
+ /* Clear Status */
+ iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
+
+ return TSI148_LCSR_INTC_VERRC;
+}
+
+/*
+ * Wake up IACK queue.
+ */
+static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
+{
+ wake_up(&bridge->iack_queue);
+
+ return TSI148_LCSR_INTC_IACKC;
+}
+
+/*
+ * Calling VME bus interrupt callback if provided.
+ */
+static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
+ u32 stat)
+{
+ int vec, i, serviced = 0;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ for (i = 7; i > 0; i--) {
+ if (stat & (1 << i)) {
+ /*
+ * Note: Even though the registers are defined as
+ * 32-bits in the spec, we only want to issue 8-bit
+ * IACK cycles on the bus, read from offset 3.
+ */
+ vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
+
+ vme_irq_handler(tsi148_bridge, i, vec);
+
+ serviced |= (1 << i);
+ }
+ }
+
+ return serviced;
+}
+
+/*
+ * Top level interrupt handler. Clears appropriate interrupt status bits and
+ * then calls appropriate sub handler(s).
+ */
+static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
+{
+ u32 stat, enable, serviced = 0;
+ struct vme_bridge *tsi148_bridge;
+ struct tsi148_driver *bridge;
+
+ tsi148_bridge = ptr;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ /* Determine which interrupts are unmasked and set */
+ enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
+ stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
+
+ /* Only look at unmasked interrupts */
+ stat &= enable;
+
+ if (unlikely(!stat))
+ return IRQ_NONE;
+
+ /* Call subhandlers as appropriate */
+ /* DMA irqs */
+ if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
+ serviced |= tsi148_DMA_irqhandler(bridge, stat);
+
+ /* Location monitor irqs */
+ if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
+ TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
+ serviced |= tsi148_LM_irqhandler(bridge, stat);
+
+ /* Mail box irqs */
+ if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
+ TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
+ serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
+
+ /* PCI bus error */
+ if (stat & TSI148_LCSR_INTS_PERRS)
+ serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
+
+ /* VME bus error */
+ if (stat & TSI148_LCSR_INTS_VERRS)
+ serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
+
+ /* IACK irq */
+ if (stat & TSI148_LCSR_INTS_IACKS)
+ serviced |= tsi148_IACK_irqhandler(bridge);
+
+ /* VME bus irqs */
+ if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
+ TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
+ TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
+ TSI148_LCSR_INTS_IRQ1S))
+ serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
+
+ /* Clear serviced interrupts */
+ iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
+
+ return IRQ_HANDLED;
+}
+
+static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
+{
+ int result;
+ unsigned int tmp;
+ struct pci_dev *pdev;
+ struct tsi148_driver *bridge;
+
+ pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
+
+ bridge = tsi148_bridge->driver_priv;
+
+ /* Initialise list for VME bus errors */
+ INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
+
+ mutex_init(&tsi148_bridge->irq_mtx);
+
+ result = request_irq(pdev->irq,
+ tsi148_irqhandler,
+ IRQF_SHARED,
+ driver_name, tsi148_bridge);
+ if (result) {
+ dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
+ "vector %02X\n", pdev->irq);
+ return result;
+ }
+
+ /* Enable and unmask interrupts */
+ tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
+ TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
+ TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
+ TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
+ TSI148_LCSR_INTEO_IACKEO;
+
+ /* This leaves the following interrupts masked.
+ * TSI148_LCSR_INTEO_VIEEO
+ * TSI148_LCSR_INTEO_SYSFLEO
+ * TSI148_LCSR_INTEO_ACFLEO
+ */
+
+ /* Don't enable Location Monitor interrupts here - they will be
+ * enabled when the location monitors are properly configured and
+ * a callback has been attached.
+ * TSI148_LCSR_INTEO_LM0EO
+ * TSI148_LCSR_INTEO_LM1EO
+ * TSI148_LCSR_INTEO_LM2EO
+ * TSI148_LCSR_INTEO_LM3EO
+ */
+
+ /* Don't enable VME interrupts until we add a handler, else the board
+ * will respond to it and we don't want that unless it knows how to
+ * properly deal with it.
+ * TSI148_LCSR_INTEO_IRQ7EO
+ * TSI148_LCSR_INTEO_IRQ6EO
+ * TSI148_LCSR_INTEO_IRQ5EO
+ * TSI148_LCSR_INTEO_IRQ4EO
+ * TSI148_LCSR_INTEO_IRQ3EO
+ * TSI148_LCSR_INTEO_IRQ2EO
+ * TSI148_LCSR_INTEO_IRQ1EO
+ */
+
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
+
+ return 0;
+}
+
+static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
+ struct pci_dev *pdev)
+{
+ struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
+
+ /* Turn off interrupts */
+ iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
+ iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
+
+ /* Clear all interrupts */
+ iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
+
+ /* Detach interrupt handler */
+ free_irq(pdev->irq, tsi148_bridge);
+}
+
+/*
+ * Check to see if an IACk has been received, return true (1) or false (0).
+ */
+static int tsi148_iack_received(struct tsi148_driver *bridge)
+{
+ u32 tmp;
+
+ tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
+
+ if (tmp & TSI148_LCSR_VICR_IRQS)
+ return 0;
+ else
+ return 1;
+}
+
+/*
+ * Configure VME interrupt
+ */
+static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
+ int state, int sync)
+{
+ struct pci_dev *pdev;
+ u32 tmp;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ /* We need to do the ordering differently for enabling and disabling */
+ if (state == 0) {
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
+ tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
+
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
+ tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
+
+ if (sync != 0) {
+ pdev = container_of(tsi148_bridge->parent,
+ struct pci_dev, dev);
+
+ synchronize_irq(pdev->irq);
+ }
+ } else {
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
+ tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
+
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
+ tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
+ }
+}
+
+/*
+ * Generate a VME bus interrupt at the requested level & vector. Wait for
+ * interrupt to be acked.
+ */
+static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
+ int statid)
+{
+ u32 tmp;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ mutex_lock(&bridge->vme_int);
+
+ /* Read VICR register */
+ tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
+
+ /* Set Status/ID */
+ tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
+ (statid & TSI148_LCSR_VICR_STID_M);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
+
+ /* Assert VMEbus IRQ */
+ tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
+
+ /* XXX Consider implementing a timeout? */
+ wait_event_interruptible(bridge->iack_queue,
+ tsi148_iack_received(bridge));
+
+ mutex_unlock(&bridge->vme_int);
+
+ return 0;
+}
+
+/*
+ * Find the first error in this address range
+ */
+static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
+ u32 aspace, unsigned long long address, size_t count)
+{
+ struct list_head *err_pos;
+ struct vme_bus_error *vme_err, *valid = NULL;
+ unsigned long long bound;
+
+ bound = address + count;
+
+ /*
+ * XXX We are currently not looking at the address space when parsing
+ * for errors. This is because parsing the Address Modifier Codes
+ * is going to be quite resource intensive to do properly. We
+ * should be OK just looking at the addresses and this is certainly
+ * much better than what we had before.
+ */
+ err_pos = NULL;
+ /* Iterate through errors */
+ list_for_each(err_pos, &tsi148_bridge->vme_errors) {
+ vme_err = list_entry(err_pos, struct vme_bus_error, list);
+ if ((vme_err->address >= address) &&
+ (vme_err->address < bound)) {
+
+ valid = vme_err;
+ break;
+ }
+ }
+
+ return valid;
+}
+
+/*
+ * Clear errors in the provided address range.
+ */
+static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
+ u32 aspace, unsigned long long address, size_t count)
+{
+ struct list_head *err_pos, *temp;
+ struct vme_bus_error *vme_err;
+ unsigned long long bound;
+
+ bound = address + count;
+
+ /*
+ * XXX We are currently not looking at the address space when parsing
+ * for errors. This is because parsing the Address Modifier Codes
+ * is going to be quite resource intensive to do properly. We
+ * should be OK just looking at the addresses and this is certainly
+ * much better than what we had before.
+ */
+ err_pos = NULL;
+ /* Iterate through errors */
+ list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
+ vme_err = list_entry(err_pos, struct vme_bus_error, list);
+
+ if ((vme_err->address >= address) &&
+ (vme_err->address < bound)) {
+
+ list_del(err_pos);
+ kfree(vme_err);
+ }
+ }
+}
+
+/*
+ * Initialize a slave window with the requested attributes.
+ */
+static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
+ unsigned long long vme_base, unsigned long long size,
+ dma_addr_t pci_base, u32 aspace, u32 cycle)
+{
+ unsigned int i, addr = 0, granularity = 0;
+ unsigned int temp_ctl = 0;
+ unsigned int vme_base_low, vme_base_high;
+ unsigned int vme_bound_low, vme_bound_high;
+ unsigned int pci_offset_low, pci_offset_high;
+ unsigned long long vme_bound, pci_offset;
+ struct vme_bridge *tsi148_bridge;
+ struct tsi148_driver *bridge;
+
+ tsi148_bridge = image->parent;
+ bridge = tsi148_bridge->driver_priv;
+
+ i = image->number;
+
+ switch (aspace) {
+ case VME_A16:
+ granularity = 0x10;
+ addr |= TSI148_LCSR_ITAT_AS_A16;
+ break;
+ case VME_A24:
+ granularity = 0x1000;
+ addr |= TSI148_LCSR_ITAT_AS_A24;
+ break;
+ case VME_A32:
+ granularity = 0x10000;
+ addr |= TSI148_LCSR_ITAT_AS_A32;
+ break;
+ case VME_A64:
+ granularity = 0x10000;
+ addr |= TSI148_LCSR_ITAT_AS_A64;
+ break;
+ case VME_CRCSR:
+ case VME_USER1:
+ case VME_USER2:
+ case VME_USER3:
+ case VME_USER4:
+ default:
+ dev_err(tsi148_bridge->parent, "Invalid address space\n");
+ return -EINVAL;
+ break;
+ }
+
+ /* Convert 64-bit variables to 2x 32-bit variables */
+ reg_split(vme_base, &vme_base_high, &vme_base_low);
+
+ /*
+ * Bound address is a valid address for the window, adjust
+ * accordingly
+ */
+ vme_bound = vme_base + size - granularity;
+ reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
+ pci_offset = (unsigned long long)pci_base - vme_base;
+ reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
+
+ if (vme_base_low & (granularity - 1)) {
+ dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
+ return -EINVAL;
+ }
+ if (vme_bound_low & (granularity - 1)) {
+ dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
+ return -EINVAL;
+ }
+ if (pci_offset_low & (granularity - 1)) {
+ dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
+ "alignment\n");
+ return -EINVAL;
+ }
+
+ /* Disable while we are mucking around */
+ temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITAT);
+ temp_ctl &= ~TSI148_LCSR_ITAT_EN;
+ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITAT);
+
+ /* Setup mapping */
+ iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITSAU);
+ iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITSAL);
+ iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITEAU);
+ iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITEAL);
+ iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITOFU);
+ iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITOFL);
+
+ /* Setup 2eSST speeds */
+ temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
+ switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
+ case VME_2eSST160:
+ temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
+ break;
+ case VME_2eSST267:
+ temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
+ break;
+ case VME_2eSST320:
+ temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
+ break;
+ }
+
+ /* Setup cycle types */
+ temp_ctl &= ~(0x1F << 7);
+ if (cycle & VME_BLT)
+ temp_ctl |= TSI148_LCSR_ITAT_BLT;
+ if (cycle & VME_MBLT)
+ temp_ctl |= TSI148_LCSR_ITAT_MBLT;
+ if (cycle & VME_2eVME)
+ temp_ctl |= TSI148_LCSR_ITAT_2eVME;
+ if (cycle & VME_2eSST)
+ temp_ctl |= TSI148_LCSR_ITAT_2eSST;
+ if (cycle & VME_2eSSTB)
+ temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
+
+ /* Setup address space */
+ temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
+ temp_ctl |= addr;
+
+ temp_ctl &= ~0xF;
+ if (cycle & VME_SUPER)
+ temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
+ if (cycle & VME_USER)
+ temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
+ if (cycle & VME_PROG)
+ temp_ctl |= TSI148_LCSR_ITAT_PGM;
+ if (cycle & VME_DATA)
+ temp_ctl |= TSI148_LCSR_ITAT_DATA;
+
+ /* Write ctl reg without enable */
+ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITAT);
+
+ if (enabled)
+ temp_ctl |= TSI148_LCSR_ITAT_EN;
+
+ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITAT);
+
+ return 0;
+}
+
+/*
+ * Get slave window configuration.
+ */
+static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
+ unsigned long long *vme_base, unsigned long long *size,
+ dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
+{
+ unsigned int i, granularity = 0, ctl = 0;
+ unsigned int vme_base_low, vme_base_high;
+ unsigned int vme_bound_low, vme_bound_high;
+ unsigned int pci_offset_low, pci_offset_high;
+ unsigned long long vme_bound, pci_offset;
+ struct tsi148_driver *bridge;
+
+ bridge = image->parent->driver_priv;
+
+ i = image->number;
+
+ /* Read registers */
+ ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITAT);
+
+ vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITSAU);
+ vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITSAL);
+ vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITEAU);
+ vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITEAL);
+ pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITOFU);
+ pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITOFL);
+
+ /* Convert 64-bit variables to 2x 32-bit variables */
+ reg_join(vme_base_high, vme_base_low, vme_base);
+ reg_join(vme_bound_high, vme_bound_low, &vme_bound);
+ reg_join(pci_offset_high, pci_offset_low, &pci_offset);
+
+ *pci_base = (dma_addr_t)vme_base + pci_offset;
+
+ *enabled = 0;
+ *aspace = 0;
+ *cycle = 0;
+
+ if (ctl & TSI148_LCSR_ITAT_EN)
+ *enabled = 1;
+
+ if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
+ granularity = 0x10;
+ *aspace |= VME_A16;
+ }
+ if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
+ granularity = 0x1000;
+ *aspace |= VME_A24;
+ }
+ if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
+ granularity = 0x10000;
+ *aspace |= VME_A32;
+ }
+ if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
+ granularity = 0x10000;
+ *aspace |= VME_A64;
+ }
+
+ /* Need granularity before we set the size */
+ *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
+
+
+ if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
+ *cycle |= VME_2eSST160;
+ if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
+ *cycle |= VME_2eSST267;
+ if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
+ *cycle |= VME_2eSST320;
+
+ if (ctl & TSI148_LCSR_ITAT_BLT)
+ *cycle |= VME_BLT;
+ if (ctl & TSI148_LCSR_ITAT_MBLT)
+ *cycle |= VME_MBLT;
+ if (ctl & TSI148_LCSR_ITAT_2eVME)
+ *cycle |= VME_2eVME;
+ if (ctl & TSI148_LCSR_ITAT_2eSST)
+ *cycle |= VME_2eSST;
+ if (ctl & TSI148_LCSR_ITAT_2eSSTB)
+ *cycle |= VME_2eSSTB;
+
+ if (ctl & TSI148_LCSR_ITAT_SUPR)
+ *cycle |= VME_SUPER;
+ if (ctl & TSI148_LCSR_ITAT_NPRIV)
+ *cycle |= VME_USER;
+ if (ctl & TSI148_LCSR_ITAT_PGM)
+ *cycle |= VME_PROG;
+ if (ctl & TSI148_LCSR_ITAT_DATA)
+ *cycle |= VME_DATA;
+
+ return 0;
+}
+
+/*
+ * Allocate and map PCI Resource
+ */
+static int tsi148_alloc_resource(struct vme_master_resource *image,
+ unsigned long long size)
+{
+ unsigned long long existing_size;
+ int retval = 0;
+ struct pci_dev *pdev;
+ struct vme_bridge *tsi148_bridge;
+
+ tsi148_bridge = image->parent;
+
+ pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
+
+ existing_size = (unsigned long long)(image->bus_resource.end -
+ image->bus_resource.start);
+
+ /* If the existing size is OK, return */
+ if ((size != 0) && (existing_size == (size - 1)))
+ return 0;
+
+ if (existing_size != 0) {
+ iounmap(image->kern_base);
+ image->kern_base = NULL;
+ kfree(image->bus_resource.name);
+ release_resource(&image->bus_resource);
+ memset(&image->bus_resource, 0, sizeof(struct resource));
+ }
+
+ /* Exit here if size is zero */
+ if (size == 0)
+ return 0;
+
+ if (image->bus_resource.name == NULL) {
+ image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
+ if (image->bus_resource.name == NULL) {
+ dev_err(tsi148_bridge->parent, "Unable to allocate "
+ "memory for resource name\n");
+ retval = -ENOMEM;
+ goto err_name;
+ }
+ }
+
+ sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
+ image->number);
+
+ image->bus_resource.start = 0;
+ image->bus_resource.end = (unsigned long)size;
+ image->bus_resource.flags = IORESOURCE_MEM;
+
+ retval = pci_bus_alloc_resource(pdev->bus,
+ &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
+ 0, NULL, NULL);
+ if (retval) {
+ dev_err(tsi148_bridge->parent, "Failed to allocate mem "
+ "resource for window %d size 0x%lx start 0x%lx\n",
+ image->number, (unsigned long)size,
+ (unsigned long)image->bus_resource.start);
+ goto err_resource;
+ }
+
+ image->kern_base = ioremap_nocache(
+ image->bus_resource.start, size);
+ if (image->kern_base == NULL) {
+ dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
+ retval = -ENOMEM;
+ goto err_remap;
+ }
+
+ return 0;
+
+err_remap:
+ release_resource(&image->bus_resource);
+err_resource:
+ kfree(image->bus_resource.name);
+ memset(&image->bus_resource, 0, sizeof(struct resource));
+err_name:
+ return retval;
+}
+
+/*
+ * Free and unmap PCI Resource
+ */
+static void tsi148_free_resource(struct vme_master_resource *image)
+{
+ iounmap(image->kern_base);
+ image->kern_base = NULL;
+ release_resource(&image->bus_resource);
+ kfree(image->bus_resource.name);
+ memset(&image->bus_resource, 0, sizeof(struct resource));
+}
+
+/*
+ * Set the attributes of an outbound window.
+ */
+static int tsi148_master_set(struct vme_master_resource *image, int enabled,
+ unsigned long long vme_base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 dwidth)
+{
+ int retval = 0;
+ unsigned int i;
+ unsigned int temp_ctl = 0;
+ unsigned int pci_base_low, pci_base_high;
+ unsigned int pci_bound_low, pci_bound_high;
+ unsigned int vme_offset_low, vme_offset_high;
+ unsigned long long pci_bound, vme_offset, pci_base;
+ struct vme_bridge *tsi148_bridge;
+ struct tsi148_driver *bridge;
+
+ tsi148_bridge = image->parent;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ /* Verify input data */
+ if (vme_base & 0xFFFF) {
+ dev_err(tsi148_bridge->parent, "Invalid VME Window "
+ "alignment\n");
+ retval = -EINVAL;
+ goto err_window;
+ }
+
+ if ((size == 0) && (enabled != 0)) {
+ dev_err(tsi148_bridge->parent, "Size must be non-zero for "
+ "enabled windows\n");
+ retval = -EINVAL;
+ goto err_window;
+ }
+
+ spin_lock(&image->lock);
+
+ /* Let's allocate the resource here rather than further up the stack as
+ * it avoids pushing loads of bus dependent stuff up the stack. If size
+ * is zero, any existing resource will be freed.
+ */
+ retval = tsi148_alloc_resource(image, size);
+ if (retval) {
+ spin_unlock(&image->lock);
+ dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
+ "resource\n");
+ goto err_res;
+ }
+
+ if (size == 0) {
+ pci_base = 0;
+ pci_bound = 0;
+ vme_offset = 0;
+ } else {
+ pci_base = (unsigned long long)image->bus_resource.start;
+
+ /*
+ * Bound address is a valid address for the window, adjust
+ * according to window granularity.
+ */
+ pci_bound = pci_base + (size - 0x10000);
+ vme_offset = vme_base - pci_base;
+ }
+
+ /* Convert 64-bit variables to 2x 32-bit variables */
+ reg_split(pci_base, &pci_base_high, &pci_base_low);
+ reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
+ reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
+
+ if (pci_base_low & 0xFFFF) {
+ spin_unlock(&image->lock);
+ dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
+ retval = -EINVAL;
+ goto err_gran;
+ }
+ if (pci_bound_low & 0xFFFF) {
+ spin_unlock(&image->lock);
+ dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
+ retval = -EINVAL;
+ goto err_gran;
+ }
+ if (vme_offset_low & 0xFFFF) {
+ spin_unlock(&image->lock);
+ dev_err(tsi148_bridge->parent, "Invalid VME Offset "
+ "alignment\n");
+ retval = -EINVAL;
+ goto err_gran;
+ }
+
+ i = image->number;
+
+ /* Disable while we are mucking around */
+ temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTAT);
+ temp_ctl &= ~TSI148_LCSR_OTAT_EN;
+ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTAT);
+
+ /* Setup 2eSST speeds */
+ temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
+ switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
+ case VME_2eSST160:
+ temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
+ break;
+ case VME_2eSST267:
+ temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
+ break;
+ case VME_2eSST320:
+ temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
+ break;
+ }
+
+ /* Setup cycle types */
+ if (cycle & VME_BLT) {
+ temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
+ temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
+ }
+ if (cycle & VME_MBLT) {
+ temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
+ temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
+ }
+ if (cycle & VME_2eVME) {
+ temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
+ temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
+ }
+ if (cycle & VME_2eSST) {
+ temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
+ temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
+ }
+ if (cycle & VME_2eSSTB) {
+ dev_warn(tsi148_bridge->parent, "Currently not setting "
+ "Broadcast Select Registers\n");
+ temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
+ temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
+ }
+
+ /* Setup data width */
+ temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
+ switch (dwidth) {
+ case VME_D16:
+ temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
+ break;
+ case VME_D32:
+ temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
+ break;
+ default:
+ spin_unlock(&image->lock);
+ dev_err(tsi148_bridge->parent, "Invalid data width\n");
+ retval = -EINVAL;
+ goto err_dwidth;
+ }
+
+ /* Setup address space */
+ temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
+ switch (aspace) {
+ case VME_A16:
+ temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
+ break;
+ case VME_A24:
+ temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
+ break;
+ case VME_A32:
+ temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
+ break;
+ case VME_A64:
+ temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
+ break;
+ case VME_CRCSR:
+ temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
+ break;
+ case VME_USER1:
+ temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
+ break;
+ case VME_USER2:
+ temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
+ break;
+ case VME_USER3:
+ temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
+ break;
+ case VME_USER4:
+ temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
+ break;
+ default:
+ spin_unlock(&image->lock);
+ dev_err(tsi148_bridge->parent, "Invalid address space\n");
+ retval = -EINVAL;
+ goto err_aspace;
+ break;
+ }
+
+ temp_ctl &= ~(3<<4);
+ if (cycle & VME_SUPER)
+ temp_ctl |= TSI148_LCSR_OTAT_SUP;
+ if (cycle & VME_PROG)
+ temp_ctl |= TSI148_LCSR_OTAT_PGM;
+
+ /* Setup mapping */
+ iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTSAU);
+ iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTSAL);
+ iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTEAU);
+ iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTEAL);
+ iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTOFU);
+ iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTOFL);
+
+ /* Write ctl reg without enable */
+ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTAT);
+
+ if (enabled)
+ temp_ctl |= TSI148_LCSR_OTAT_EN;
+
+ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTAT);
+
+ spin_unlock(&image->lock);
+ return 0;
+
+err_aspace:
+err_dwidth:
+err_gran:
+ tsi148_free_resource(image);
+err_res:
+err_window:
+ return retval;
+
+}
+
+/*
+ * Set the attributes of an outbound window.
+ *
+ * XXX Not parsing prefetch information.
+ */
+static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
+{
+ unsigned int i, ctl;
+ unsigned int pci_base_low, pci_base_high;
+ unsigned int pci_bound_low, pci_bound_high;
+ unsigned int vme_offset_low, vme_offset_high;
+
+ unsigned long long pci_base, pci_bound, vme_offset;
+ struct tsi148_driver *bridge;
+
+ bridge = image->parent->driver_priv;
+
+ i = image->number;
+
+ ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTAT);
+
+ pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTSAU);
+ pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTSAL);
+ pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTEAU);
+ pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTEAL);
+ vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTOFU);
+ vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTOFL);
+
+ /* Convert 64-bit variables to 2x 32-bit variables */
+ reg_join(pci_base_high, pci_base_low, &pci_base);
+ reg_join(pci_bound_high, pci_bound_low, &pci_bound);
+ reg_join(vme_offset_high, vme_offset_low, &vme_offset);
+
+ *vme_base = pci_base + vme_offset;
+ *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
+
+ *enabled = 0;
+ *aspace = 0;
+ *cycle = 0;
+ *dwidth = 0;
+
+ if (ctl & TSI148_LCSR_OTAT_EN)
+ *enabled = 1;
+
+ /* Setup address space */
+ if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
+ *aspace |= VME_A16;
+ if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
+ *aspace |= VME_A24;
+ if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
+ *aspace |= VME_A32;
+ if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
+ *aspace |= VME_A64;
+ if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
+ *aspace |= VME_CRCSR;
+ if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
+ *aspace |= VME_USER1;
+ if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
+ *aspace |= VME_USER2;
+ if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
+ *aspace |= VME_USER3;
+ if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
+ *aspace |= VME_USER4;
+
+ /* Setup 2eSST speeds */
+ if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
+ *cycle |= VME_2eSST160;
+ if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
+ *cycle |= VME_2eSST267;
+ if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
+ *cycle |= VME_2eSST320;
+
+ /* Setup cycle types */
+ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
+ *cycle |= VME_SCT;
+ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
+ *cycle |= VME_BLT;
+ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
+ *cycle |= VME_MBLT;
+ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
+ *cycle |= VME_2eVME;
+ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
+ *cycle |= VME_2eSST;
+ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
+ *cycle |= VME_2eSSTB;
+
+ if (ctl & TSI148_LCSR_OTAT_SUP)
+ *cycle |= VME_SUPER;
+ else
+ *cycle |= VME_USER;
+
+ if (ctl & TSI148_LCSR_OTAT_PGM)
+ *cycle |= VME_PROG;
+ else
+ *cycle |= VME_DATA;
+
+ /* Setup data width */
+ if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
+ *dwidth = VME_D16;
+ if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
+ *dwidth = VME_D32;
+
+ return 0;
+}
+
+
+static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
+{
+ int retval;
+
+ spin_lock(&image->lock);
+
+ retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
+ cycle, dwidth);
+
+ spin_unlock(&image->lock);
+
+ return retval;
+}
+
+static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
+ size_t count, loff_t offset)
+{
+ int retval, enabled;
+ unsigned long long vme_base, size;
+ u32 aspace, cycle, dwidth;
+ struct vme_bus_error *vme_err = NULL;
+ struct vme_bridge *tsi148_bridge;
+
+ tsi148_bridge = image->parent;
+
+ spin_lock(&image->lock);
+
+ memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
+ retval = count;
+
+ if (!err_chk)
+ goto skip_chk;
+
+ __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
+ &dwidth);
+
+ vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
+ count);
+ if (vme_err != NULL) {
+ dev_err(image->parent->parent, "First VME read error detected "
+ "an at address 0x%llx\n", vme_err->address);
+ retval = vme_err->address - (vme_base + offset);
+ /* Clear down save errors in this address range */
+ tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
+ count);
+ }
+
+skip_chk:
+ spin_unlock(&image->lock);
+
+ return retval;
+}
+
+
+static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
+ size_t count, loff_t offset)
+{
+ int retval = 0, enabled;
+ unsigned long long vme_base, size;
+ u32 aspace, cycle, dwidth;
+
+ struct vme_bus_error *vme_err = NULL;
+ struct vme_bridge *tsi148_bridge;
+ struct tsi148_driver *bridge;
+
+ tsi148_bridge = image->parent;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ spin_lock(&image->lock);
+
+ memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
+ retval = count;
+
+ /*
+ * Writes are posted. We need to do a read on the VME bus to flush out
+ * all of the writes before we check for errors. We can't guarantee
+ * that reading the data we have just written is safe. It is believed
+ * that there isn't any read, write re-ordering, so we can read any
+ * location in VME space, so lets read the Device ID from the tsi148's
+ * own registers as mapped into CR/CSR space.
+ *
+ * We check for saved errors in the written address range/space.
+ */
+
+ if (!err_chk)
+ goto skip_chk;
+
+ /*
+ * Get window info first, to maximise the time that the buffers may
+ * fluch on their own
+ */
+ __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
+ &dwidth);
+
+ ioread16(bridge->flush_image->kern_base + 0x7F000);
+
+ vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
+ count);
+ if (vme_err != NULL) {
+ dev_warn(tsi148_bridge->parent, "First VME write error detected"
+ " an at address 0x%llx\n", vme_err->address);
+ retval = vme_err->address - (vme_base + offset);
+ /* Clear down save errors in this address range */
+ tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
+ count);
+ }
+
+skip_chk:
+ spin_unlock(&image->lock);
+
+ return retval;
+}
+
+/*
+ * Perform an RMW cycle on the VME bus.
+ *
+ * Requires a previously configured master window, returns final value.
+ */
+static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
+ unsigned int mask, unsigned int compare, unsigned int swap,
+ loff_t offset)
+{
+ unsigned long long pci_addr;
+ unsigned int pci_addr_high, pci_addr_low;
+ u32 tmp, result;
+ int i;
+ struct tsi148_driver *bridge;
+
+ bridge = image->parent->driver_priv;
+
+ /* Find the PCI address that maps to the desired VME address */
+ i = image->number;
+
+ /* Locking as we can only do one of these at a time */
+ mutex_lock(&bridge->vme_rmw);
+
+ /* Lock image */
+ spin_lock(&image->lock);
+
+ pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTSAU);
+ pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTSAL);
+
+ reg_join(pci_addr_high, pci_addr_low, &pci_addr);
+ reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
+
+ /* Configure registers */
+ iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
+ iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
+ iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
+ iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
+ iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
+
+ /* Enable RMW */
+ tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
+ tmp |= TSI148_LCSR_VMCTRL_RMWEN;
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
+
+ /* Kick process off with a read to the required address. */
+ result = ioread32be(image->kern_base + offset);
+
+ /* Disable RMW */
+ tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
+ tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
+
+ spin_unlock(&image->lock);
+
+ mutex_unlock(&bridge->vme_rmw);
+
+ return result;
+}
+
+static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
+ u32 aspace, u32 cycle, u32 dwidth)
+{
+ u32 val;
+
+ val = be32_to_cpu(*attr);
+
+ /* Setup 2eSST speeds */
+ switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
+ case VME_2eSST160:
+ val |= TSI148_LCSR_DSAT_2eSSTM_160;
+ break;
+ case VME_2eSST267:
+ val |= TSI148_LCSR_DSAT_2eSSTM_267;
+ break;
+ case VME_2eSST320:
+ val |= TSI148_LCSR_DSAT_2eSSTM_320;
+ break;
+ }
+
+ /* Setup cycle types */
+ if (cycle & VME_SCT)
+ val |= TSI148_LCSR_DSAT_TM_SCT;
+
+ if (cycle & VME_BLT)
+ val |= TSI148_LCSR_DSAT_TM_BLT;
+
+ if (cycle & VME_MBLT)
+ val |= TSI148_LCSR_DSAT_TM_MBLT;
+
+ if (cycle & VME_2eVME)
+ val |= TSI148_LCSR_DSAT_TM_2eVME;
+
+ if (cycle & VME_2eSST)
+ val |= TSI148_LCSR_DSAT_TM_2eSST;
+
+ if (cycle & VME_2eSSTB) {
+ dev_err(dev, "Currently not setting Broadcast Select "
+ "Registers\n");
+ val |= TSI148_LCSR_DSAT_TM_2eSSTB;
+ }
+
+ /* Setup data width */
+ switch (dwidth) {
+ case VME_D16:
+ val |= TSI148_LCSR_DSAT_DBW_16;
+ break;
+ case VME_D32:
+ val |= TSI148_LCSR_DSAT_DBW_32;
+ break;
+ default:
+ dev_err(dev, "Invalid data width\n");
+ return -EINVAL;
+ }
+
+ /* Setup address space */
+ switch (aspace) {
+ case VME_A16:
+ val |= TSI148_LCSR_DSAT_AMODE_A16;
+ break;
+ case VME_A24:
+ val |= TSI148_LCSR_DSAT_AMODE_A24;
+ break;
+ case VME_A32:
+ val |= TSI148_LCSR_DSAT_AMODE_A32;
+ break;
+ case VME_A64:
+ val |= TSI148_LCSR_DSAT_AMODE_A64;
+ break;
+ case VME_CRCSR:
+ val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
+ break;
+ case VME_USER1:
+ val |= TSI148_LCSR_DSAT_AMODE_USER1;
+ break;
+ case VME_USER2:
+ val |= TSI148_LCSR_DSAT_AMODE_USER2;
+ break;
+ case VME_USER3:
+ val |= TSI148_LCSR_DSAT_AMODE_USER3;
+ break;
+ case VME_USER4:
+ val |= TSI148_LCSR_DSAT_AMODE_USER4;
+ break;
+ default:
+ dev_err(dev, "Invalid address space\n");
+ return -EINVAL;
+ break;
+ }
+
+ if (cycle & VME_SUPER)
+ val |= TSI148_LCSR_DSAT_SUP;
+ if (cycle & VME_PROG)
+ val |= TSI148_LCSR_DSAT_PGM;
+
+ *attr = cpu_to_be32(val);
+
+ return 0;
+}
+
+static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
+ u32 aspace, u32 cycle, u32 dwidth)
+{
+ u32 val;
+
+ val = be32_to_cpu(*attr);
+
+ /* Setup 2eSST speeds */
+ switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
+ case VME_2eSST160:
+ val |= TSI148_LCSR_DDAT_2eSSTM_160;
+ break;
+ case VME_2eSST267:
+ val |= TSI148_LCSR_DDAT_2eSSTM_267;
+ break;
+ case VME_2eSST320:
+ val |= TSI148_LCSR_DDAT_2eSSTM_320;
+ break;
+ }
+
+ /* Setup cycle types */
+ if (cycle & VME_SCT)
+ val |= TSI148_LCSR_DDAT_TM_SCT;
+
+ if (cycle & VME_BLT)
+ val |= TSI148_LCSR_DDAT_TM_BLT;
+
+ if (cycle & VME_MBLT)
+ val |= TSI148_LCSR_DDAT_TM_MBLT;
+
+ if (cycle & VME_2eVME)
+ val |= TSI148_LCSR_DDAT_TM_2eVME;
+
+ if (cycle & VME_2eSST)
+ val |= TSI148_LCSR_DDAT_TM_2eSST;
+
+ if (cycle & VME_2eSSTB) {
+ dev_err(dev, "Currently not setting Broadcast Select "
+ "Registers\n");
+ val |= TSI148_LCSR_DDAT_TM_2eSSTB;
+ }
+
+ /* Setup data width */
+ switch (dwidth) {
+ case VME_D16:
+ val |= TSI148_LCSR_DDAT_DBW_16;
+ break;
+ case VME_D32:
+ val |= TSI148_LCSR_DDAT_DBW_32;
+ break;
+ default:
+ dev_err(dev, "Invalid data width\n");
+ return -EINVAL;
+ }
+
+ /* Setup address space */
+ switch (aspace) {
+ case VME_A16:
+ val |= TSI148_LCSR_DDAT_AMODE_A16;
+ break;
+ case VME_A24:
+ val |= TSI148_LCSR_DDAT_AMODE_A24;
+ break;
+ case VME_A32:
+ val |= TSI148_LCSR_DDAT_AMODE_A32;
+ break;
+ case VME_A64:
+ val |= TSI148_LCSR_DDAT_AMODE_A64;
+ break;
+ case VME_CRCSR:
+ val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
+ break;
+ case VME_USER1:
+ val |= TSI148_LCSR_DDAT_AMODE_USER1;
+ break;
+ case VME_USER2:
+ val |= TSI148_LCSR_DDAT_AMODE_USER2;
+ break;
+ case VME_USER3:
+ val |= TSI148_LCSR_DDAT_AMODE_USER3;
+ break;
+ case VME_USER4:
+ val |= TSI148_LCSR_DDAT_AMODE_USER4;
+ break;
+ default:
+ dev_err(dev, "Invalid address space\n");
+ return -EINVAL;
+ break;
+ }
+
+ if (cycle & VME_SUPER)
+ val |= TSI148_LCSR_DDAT_SUP;
+ if (cycle & VME_PROG)
+ val |= TSI148_LCSR_DDAT_PGM;
+
+ *attr = cpu_to_be32(val);
+
+ return 0;
+}
+
+/*
+ * Add a link list descriptor to the list
+ *
+ * Note: DMA engine expects the DMA descriptor to be big endian.
+ */
+static int tsi148_dma_list_add(struct vme_dma_list *list,
+ struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
+{
+ struct tsi148_dma_entry *entry, *prev;
+ u32 address_high, address_low, val;
+ struct vme_dma_pattern *pattern_attr;
+ struct vme_dma_pci *pci_attr;
+ struct vme_dma_vme *vme_attr;
+ int retval = 0;
+ struct vme_bridge *tsi148_bridge;
+
+ tsi148_bridge = list->parent->parent;
+
+ /* Descriptor must be aligned on 64-bit boundaries */
+ entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
+ if (entry == NULL) {
+ dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
+ "dma resource structure\n");
+ retval = -ENOMEM;
+ goto err_mem;
+ }
+
+ /* Test descriptor alignment */
+ if ((unsigned long)&entry->descriptor & 0x7) {
+ dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
+ "byte boundary as required: %p\n",
+ &entry->descriptor);
+ retval = -EINVAL;
+ goto err_align;
+ }
+
+ /* Given we are going to fill out the structure, we probably don't
+ * need to zero it, but better safe than sorry for now.
+ */
+ memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
+
+ /* Fill out source part */
+ switch (src->type) {
+ case VME_DMA_PATTERN:
+ pattern_attr = src->private;
+
+ entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
+
+ val = TSI148_LCSR_DSAT_TYP_PAT;
+
+ /* Default behaviour is 32 bit pattern */
+ if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
+ val |= TSI148_LCSR_DSAT_PSZ;
+
+ /* It seems that the default behaviour is to increment */
+ if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
+ val |= TSI148_LCSR_DSAT_NIN;
+ entry->descriptor.dsat = cpu_to_be32(val);
+ break;
+ case VME_DMA_PCI:
+ pci_attr = src->private;
+
+ reg_split((unsigned long long)pci_attr->address, &address_high,
+ &address_low);
+ entry->descriptor.dsau = cpu_to_be32(address_high);
+ entry->descriptor.dsal = cpu_to_be32(address_low);
+ entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
+ break;
+ case VME_DMA_VME:
+ vme_attr = src->private;
+
+ reg_split((unsigned long long)vme_attr->address, &address_high,
+ &address_low);
+ entry->descriptor.dsau = cpu_to_be32(address_high);
+ entry->descriptor.dsal = cpu_to_be32(address_low);
+ entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
+
+ retval = tsi148_dma_set_vme_src_attributes(
+ tsi148_bridge->parent, &entry->descriptor.dsat,
+ vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
+ if (retval < 0)
+ goto err_source;
+ break;
+ default:
+ dev_err(tsi148_bridge->parent, "Invalid source type\n");
+ retval = -EINVAL;
+ goto err_source;
+ break;
+ }
+
+ /* Assume last link - this will be over-written by adding another */
+ entry->descriptor.dnlau = cpu_to_be32(0);
+ entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
+
+ /* Fill out destination part */
+ switch (dest->type) {
+ case VME_DMA_PCI:
+ pci_attr = dest->private;
+
+ reg_split((unsigned long long)pci_attr->address, &address_high,
+ &address_low);
+ entry->descriptor.ddau = cpu_to_be32(address_high);
+ entry->descriptor.ddal = cpu_to_be32(address_low);
+ entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
+ break;
+ case VME_DMA_VME:
+ vme_attr = dest->private;
+
+ reg_split((unsigned long long)vme_attr->address, &address_high,
+ &address_low);
+ entry->descriptor.ddau = cpu_to_be32(address_high);
+ entry->descriptor.ddal = cpu_to_be32(address_low);
+ entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
+
+ retval = tsi148_dma_set_vme_dest_attributes(
+ tsi148_bridge->parent, &entry->descriptor.ddat,
+ vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
+ if (retval < 0)
+ goto err_dest;
+ break;
+ default:
+ dev_err(tsi148_bridge->parent, "Invalid destination type\n");
+ retval = -EINVAL;
+ goto err_dest;
+ break;
+ }
+
+ /* Fill out count */
+ entry->descriptor.dcnt = cpu_to_be32((u32)count);
+
+ /* Add to list */
+ list_add_tail(&entry->list, &list->entries);
+
+ /* Fill out previous descriptors "Next Address" */
+ if (entry->list.prev != &list->entries) {
+ prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
+ list);
+ /* We need the bus address for the pointer */
+ entry->dma_handle = dma_map_single(tsi148_bridge->parent,
+ &entry->descriptor,
+ sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
+
+ reg_split((unsigned long long)entry->dma_handle, &address_high,
+ &address_low);
+ entry->descriptor.dnlau = cpu_to_be32(address_high);
+ entry->descriptor.dnlal = cpu_to_be32(address_low);
+
+ }
+
+ return 0;
+
+err_dest:
+err_source:
+err_align:
+ kfree(entry);
+err_mem:
+ return retval;
+}
+
+/*
+ * Check to see if the provided DMA channel is busy.
+ */
+static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
+{
+ u32 tmp;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
+ TSI148_LCSR_OFFSET_DSTA);
+
+ if (tmp & TSI148_LCSR_DSTA_BSY)
+ return 0;
+ else
+ return 1;
+
+}
+
+/*
+ * Execute a previously generated link list
+ *
+ * XXX Need to provide control register configuration.
+ */
+static int tsi148_dma_list_exec(struct vme_dma_list *list)
+{
+ struct vme_dma_resource *ctrlr;
+ int channel, retval = 0;
+ struct tsi148_dma_entry *entry;
+ u32 bus_addr_high, bus_addr_low;
+ u32 val, dctlreg = 0;
+ struct vme_bridge *tsi148_bridge;
+ struct tsi148_driver *bridge;
+
+ ctrlr = list->parent;
+
+ tsi148_bridge = ctrlr->parent;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ mutex_lock(&ctrlr->mtx);
+
+ channel = ctrlr->number;
+
+ if (!list_empty(&ctrlr->running)) {
+ /*
+ * XXX We have an active DMA transfer and currently haven't
+ * sorted out the mechanism for "pending" DMA transfers.
+ * Return busy.
+ */
+ /* Need to add to pending here */
+ mutex_unlock(&ctrlr->mtx);
+ return -EBUSY;
+ } else {
+ list_add(&list->list, &ctrlr->running);
+ }
+
+ /* Get first bus address and write into registers */
+ entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
+ list);
+
+ entry->dma_handle = dma_map_single(tsi148_bridge->parent,
+ &entry->descriptor,
+ sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
+
+ mutex_unlock(&ctrlr->mtx);
+
+ reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
+
+ iowrite32be(bus_addr_high, bridge->base +
+ TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
+ iowrite32be(bus_addr_low, bridge->base +
+ TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
+
+ dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
+ TSI148_LCSR_OFFSET_DCTL);
+
+ /* Start the operation */
+ iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
+ TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
+
+ wait_event_interruptible(bridge->dma_queue[channel],
+ tsi148_dma_busy(ctrlr->parent, channel));
+
+ /*
+ * Read status register, this register is valid until we kick off a
+ * new transfer.
+ */
+ val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
+ TSI148_LCSR_OFFSET_DSTA);
+
+ if (val & TSI148_LCSR_DSTA_VBE) {
+ dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
+ retval = -EIO;
+ }
+
+ /* Remove list from running list */
+ mutex_lock(&ctrlr->mtx);
+ list_del(&list->list);
+ mutex_unlock(&ctrlr->mtx);
+
+ return retval;
+}
+
+/*
+ * Clean up a previously generated link list
+ *
+ * We have a separate function, don't assume that the chain can't be reused.
+ */
+static int tsi148_dma_list_empty(struct vme_dma_list *list)
+{
+ struct list_head *pos, *temp;
+ struct tsi148_dma_entry *entry;
+
+ struct vme_bridge *tsi148_bridge = list->parent->parent;
+
+ /* detach and free each entry */
+ list_for_each_safe(pos, temp, &list->entries) {
+ list_del(pos);
+ entry = list_entry(pos, struct tsi148_dma_entry, list);
+
+ dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
+ sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
+ kfree(entry);
+ }
+
+ return 0;
+}
+
+/*
+ * All 4 location monitors reside at the same base - this is therefore a
+ * system wide configuration.
+ *
+ * This does not enable the LM monitor - that should be done when the first
+ * callback is attached and disabled when the last callback is removed.
+ */
+static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
+ u32 aspace, u32 cycle)
+{
+ u32 lm_base_high, lm_base_low, lm_ctl = 0;
+ int i;
+ struct vme_bridge *tsi148_bridge;
+ struct tsi148_driver *bridge;
+
+ tsi148_bridge = lm->parent;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ mutex_lock(&lm->mtx);
+
+ /* If we already have a callback attached, we can't move it! */
+ for (i = 0; i < lm->monitors; i++) {
+ if (bridge->lm_callback[i] != NULL) {
+ mutex_unlock(&lm->mtx);
+ dev_err(tsi148_bridge->parent, "Location monitor "
+ "callback attached, can't reset\n");
+ return -EBUSY;
+ }
+ }
+
+ switch (aspace) {
+ case VME_A16:
+ lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
+ break;
+ case VME_A24:
+ lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
+ break;
+ case VME_A32:
+ lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
+ break;
+ case VME_A64:
+ lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
+ break;
+ default:
+ mutex_unlock(&lm->mtx);
+ dev_err(tsi148_bridge->parent, "Invalid address space\n");
+ return -EINVAL;
+ break;
+ }
+
+ if (cycle & VME_SUPER)
+ lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
+ if (cycle & VME_USER)
+ lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
+ if (cycle & VME_PROG)
+ lm_ctl |= TSI148_LCSR_LMAT_PGM;
+ if (cycle & VME_DATA)
+ lm_ctl |= TSI148_LCSR_LMAT_DATA;
+
+ reg_split(lm_base, &lm_base_high, &lm_base_low);
+
+ iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
+ iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
+ iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
+
+ mutex_unlock(&lm->mtx);
+
+ return 0;
+}
+
+/* Get configuration of the callback monitor and return whether it is enabled
+ * or disabled.
+ */
+static int tsi148_lm_get(struct vme_lm_resource *lm,
+ unsigned long long *lm_base, u32 *aspace, u32 *cycle)
+{
+ u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
+ struct tsi148_driver *bridge;
+
+ bridge = lm->parent->driver_priv;
+
+ mutex_lock(&lm->mtx);
+
+ lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
+ lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
+ lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
+
+ reg_join(lm_base_high, lm_base_low, lm_base);
+
+ if (lm_ctl & TSI148_LCSR_LMAT_EN)
+ enabled = 1;
+
+ if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
+ *aspace |= VME_A16;
+
+ if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
+ *aspace |= VME_A24;
+
+ if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
+ *aspace |= VME_A32;
+
+ if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
+ *aspace |= VME_A64;
+
+
+ if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
+ *cycle |= VME_SUPER;
+ if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
+ *cycle |= VME_USER;
+ if (lm_ctl & TSI148_LCSR_LMAT_PGM)
+ *cycle |= VME_PROG;
+ if (lm_ctl & TSI148_LCSR_LMAT_DATA)
+ *cycle |= VME_DATA;
+
+ mutex_unlock(&lm->mtx);
+
+ return enabled;
+}
+
+/*
+ * Attach a callback to a specific location monitor.
+ *
+ * Callback will be passed the monitor triggered.
+ */
+static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
+ void (*callback)(int))
+{
+ u32 lm_ctl, tmp;
+ struct vme_bridge *tsi148_bridge;
+ struct tsi148_driver *bridge;
+
+ tsi148_bridge = lm->parent;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ mutex_lock(&lm->mtx);
+
+ /* Ensure that the location monitor is configured - need PGM or DATA */
+ lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
+ if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
+ mutex_unlock(&lm->mtx);
+ dev_err(tsi148_bridge->parent, "Location monitor not properly "
+ "configured\n");
+ return -EINVAL;
+ }
+
+ /* Check that a callback isn't already attached */
+ if (bridge->lm_callback[monitor] != NULL) {
+ mutex_unlock(&lm->mtx);
+ dev_err(tsi148_bridge->parent, "Existing callback attached\n");
+ return -EBUSY;
+ }
+
+ /* Attach callback */
+ bridge->lm_callback[monitor] = callback;
+
+ /* Enable Location Monitor interrupt */
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
+ tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
+
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
+ tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
+
+ /* Ensure that global Location Monitor Enable set */
+ if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
+ lm_ctl |= TSI148_LCSR_LMAT_EN;
+ iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
+ }
+
+ mutex_unlock(&lm->mtx);
+
+ return 0;
+}
+
+/*
+ * Detach a callback function forn a specific location monitor.
+ */
+static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
+{
+ u32 lm_en, tmp;
+ struct tsi148_driver *bridge;
+
+ bridge = lm->parent->driver_priv;
+
+ mutex_lock(&lm->mtx);
+
+ /* Disable Location Monitor and ensure previous interrupts are clear */
+ lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
+ lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
+ iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
+
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
+ tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
+
+ iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
+ bridge->base + TSI148_LCSR_INTC);
+
+ /* Detach callback */
+ bridge->lm_callback[monitor] = NULL;
+
+ /* If all location monitors disabled, disable global Location Monitor */
+ if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
+ TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
+ tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
+ tmp &= ~TSI148_LCSR_LMAT_EN;
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
+ }
+
+ mutex_unlock(&lm->mtx);
+
+ return 0;
+}
+
+/*
+ * Determine Geographical Addressing
+ */
+static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
+{
+ u32 slot = 0;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ if (!geoid) {
+ slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
+ slot = slot & TSI148_LCSR_VSTAT_GA_M;
+ } else
+ slot = geoid;
+
+ return (int)slot;
+}
+
+void *tsi148_alloc_consistent(struct device *parent, size_t size,
+ dma_addr_t *dma)
+{
+ struct pci_dev *pdev;
+
+ /* Find pci_dev container of dev */
+ pdev = container_of(parent, struct pci_dev, dev);
+
+ return pci_alloc_consistent(pdev, size, dma);
+}
+
+void tsi148_free_consistent(struct device *parent, size_t size, void *vaddr,
+ dma_addr_t dma)
+{
+ struct pci_dev *pdev;
+
+ /* Find pci_dev container of dev */
+ pdev = container_of(parent, struct pci_dev, dev);
+
+ pci_free_consistent(pdev, size, vaddr, dma);
+}
+
+static int __init tsi148_init(void)
+{
+ return pci_register_driver(&tsi148_driver);
+}
+
+/*
+ * Configure CR/CSR space
+ *
+ * Access to the CR/CSR can be configured at power-up. The location of the
+ * CR/CSR registers in the CR/CSR address space is determined by the boards
+ * Auto-ID or Geographic address. This function ensures that the window is
+ * enabled at an offset consistent with the boards geopgraphic address.
+ *
+ * Each board has a 512kB window, with the highest 4kB being used for the
+ * boards registers, this means there is a fix length 508kB window which must
+ * be mapped onto PCI memory.
+ */
+static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
+ struct pci_dev *pdev)
+{
+ u32 cbar, crat, vstat;
+ u32 crcsr_bus_high, crcsr_bus_low;
+ int retval;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ /* Allocate mem for CR/CSR image */
+ bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
+ &bridge->crcsr_bus);
+ if (bridge->crcsr_kernel == NULL) {
+ dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
+ "CR/CSR image\n");
+ return -ENOMEM;
+ }
+
+ memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
+
+ reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
+
+ iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
+ iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
+
+ /* Ensure that the CR/CSR is configured at the correct offset */
+ cbar = ioread32be(bridge->base + TSI148_CBAR);
+ cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
+
+ vstat = tsi148_slot_get(tsi148_bridge);
+
+ if (cbar != vstat) {
+ cbar = vstat;
+ dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
+ iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
+ }
+ dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
+
+ crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
+ if (crat & TSI148_LCSR_CRAT_EN) {
+ dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
+ iowrite32be(crat | TSI148_LCSR_CRAT_EN,
+ bridge->base + TSI148_LCSR_CRAT);
+ } else
+ dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
+
+ /* If we want flushed, error-checked writes, set up a window
+ * over the CR/CSR registers. We read from here to safely flush
+ * through VME writes.
+ */
+ if (err_chk) {
+ retval = tsi148_master_set(bridge->flush_image, 1,
+ (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
+ VME_D16);
+ if (retval)
+ dev_err(tsi148_bridge->parent, "Configuring flush image"
+ " failed\n");
+ }
+
+ return 0;
+
+}
+
+static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
+ struct pci_dev *pdev)
+{
+ u32 crat;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ /* Turn off CR/CSR space */
+ crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
+ iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
+ bridge->base + TSI148_LCSR_CRAT);
+
+ /* Free image */
+ iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
+ iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
+
+ pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
+ bridge->crcsr_bus);
+}
+
+static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int retval, i, master_num;
+ u32 data;
+ struct list_head *pos = NULL;
+ struct vme_bridge *tsi148_bridge;
+ struct tsi148_driver *tsi148_device;
+ struct vme_master_resource *master_image;
+ struct vme_slave_resource *slave_image;
+ struct vme_dma_resource *dma_ctrlr;
+ struct vme_lm_resource *lm;
+
+ /* If we want to support more than one of each bridge, we need to
+ * dynamically generate this so we get one per device
+ */
+ tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
+ if (tsi148_bridge == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for device "
+ "structure\n");
+ retval = -ENOMEM;
+ goto err_struct;
+ }
+
+ tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
+ if (tsi148_device == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for device "
+ "structure\n");
+ retval = -ENOMEM;
+ goto err_driver;
+ }
+
+ tsi148_bridge->driver_priv = tsi148_device;
+
+ /* Enable the device */
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "Unable to enable device\n");
+ goto err_enable;
+ }
+
+ /* Map Registers */
+ retval = pci_request_regions(pdev, driver_name);
+ if (retval) {
+ dev_err(&pdev->dev, "Unable to reserve resources\n");
+ goto err_resource;
+ }
+
+ /* map registers in BAR 0 */
+ tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
+ 4096);
+ if (!tsi148_device->base) {
+ dev_err(&pdev->dev, "Unable to remap CRG region\n");
+ retval = -EIO;
+ goto err_remap;
+ }
+
+ /* Check to see if the mapping worked out */
+ data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
+ if (data != PCI_VENDOR_ID_TUNDRA) {
+ dev_err(&pdev->dev, "CRG region check failed\n");
+ retval = -EIO;
+ goto err_test;
+ }
+
+ /* Initialize wait queues & mutual exclusion flags */
+ init_waitqueue_head(&tsi148_device->dma_queue[0]);
+ init_waitqueue_head(&tsi148_device->dma_queue[1]);
+ init_waitqueue_head(&tsi148_device->iack_queue);
+ mutex_init(&tsi148_device->vme_int);
+ mutex_init(&tsi148_device->vme_rmw);
+
+ tsi148_bridge->parent = &pdev->dev;
+ strcpy(tsi148_bridge->name, driver_name);
+
+ /* Setup IRQ */
+ retval = tsi148_irq_init(tsi148_bridge);
+ if (retval != 0) {
+ dev_err(&pdev->dev, "Chip Initialization failed.\n");
+ goto err_irq;
+ }
+
+ /* If we are going to flush writes, we need to read from the VME bus.
+ * We need to do this safely, thus we read the devices own CR/CSR
+ * register. To do this we must set up a window in CR/CSR space and
+ * hence have one less master window resource available.
+ */
+ master_num = TSI148_MAX_MASTER;
+ if (err_chk) {
+ master_num--;
+
+ tsi148_device->flush_image =
+ kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
+ if (tsi148_device->flush_image == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for "
+ "flush resource structure\n");
+ retval = -ENOMEM;
+ goto err_master;
+ }
+ tsi148_device->flush_image->parent = tsi148_bridge;
+ spin_lock_init(&tsi148_device->flush_image->lock);
+ tsi148_device->flush_image->locked = 1;
+ tsi148_device->flush_image->number = master_num;
+ tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
+ VME_A32 | VME_A64;
+ tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
+ VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
+ VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
+ VME_USER | VME_PROG | VME_DATA;
+ tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
+ memset(&tsi148_device->flush_image->bus_resource, 0,
+ sizeof(struct resource));
+ tsi148_device->flush_image->kern_base = NULL;
+ }
+
+ /* Add master windows to list */
+ INIT_LIST_HEAD(&tsi148_bridge->master_resources);
+ for (i = 0; i < master_num; i++) {
+ master_image = kmalloc(sizeof(struct vme_master_resource),
+ GFP_KERNEL);
+ if (master_image == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for "
+ "master resource structure\n");
+ retval = -ENOMEM;
+ goto err_master;
+ }
+ master_image->parent = tsi148_bridge;
+ spin_lock_init(&master_image->lock);
+ master_image->locked = 0;
+ master_image->number = i;
+ master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
+ VME_A64;
+ master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
+ VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
+ VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
+ VME_PROG | VME_DATA;
+ master_image->width_attr = VME_D16 | VME_D32;
+ memset(&master_image->bus_resource, 0,
+ sizeof(struct resource));
+ master_image->kern_base = NULL;
+ list_add_tail(&master_image->list,
+ &tsi148_bridge->master_resources);
+ }
+
+ /* Add slave windows to list */
+ INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
+ for (i = 0; i < TSI148_MAX_SLAVE; i++) {
+ slave_image = kmalloc(sizeof(struct vme_slave_resource),
+ GFP_KERNEL);
+ if (slave_image == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for "
+ "slave resource structure\n");
+ retval = -ENOMEM;
+ goto err_slave;
+ }
+ slave_image->parent = tsi148_bridge;
+ mutex_init(&slave_image->mtx);
+ slave_image->locked = 0;
+ slave_image->number = i;
+ slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
+ VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
+ VME_USER3 | VME_USER4;
+ slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
+ VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
+ VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
+ VME_PROG | VME_DATA;
+ list_add_tail(&slave_image->list,
+ &tsi148_bridge->slave_resources);
+ }
+
+ /* Add dma engines to list */
+ INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
+ for (i = 0; i < TSI148_MAX_DMA; i++) {
+ dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
+ GFP_KERNEL);
+ if (dma_ctrlr == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for "
+ "dma resource structure\n");
+ retval = -ENOMEM;
+ goto err_dma;
+ }
+ dma_ctrlr->parent = tsi148_bridge;
+ mutex_init(&dma_ctrlr->mtx);
+ dma_ctrlr->locked = 0;
+ dma_ctrlr->number = i;
+ dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
+ VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
+ VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
+ VME_DMA_PATTERN_TO_MEM;
+ INIT_LIST_HEAD(&dma_ctrlr->pending);
+ INIT_LIST_HEAD(&dma_ctrlr->running);
+ list_add_tail(&dma_ctrlr->list,
+ &tsi148_bridge->dma_resources);
+ }
+
+ /* Add location monitor to list */
+ INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
+ lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
+ if (lm == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for "
+ "location monitor resource structure\n");
+ retval = -ENOMEM;
+ goto err_lm;
+ }
+ lm->parent = tsi148_bridge;
+ mutex_init(&lm->mtx);
+ lm->locked = 0;
+ lm->number = 1;
+ lm->monitors = 4;
+ list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
+
+ tsi148_bridge->slave_get = tsi148_slave_get;
+ tsi148_bridge->slave_set = tsi148_slave_set;
+ tsi148_bridge->master_get = tsi148_master_get;
+ tsi148_bridge->master_set = tsi148_master_set;
+ tsi148_bridge->master_read = tsi148_master_read;
+ tsi148_bridge->master_write = tsi148_master_write;
+ tsi148_bridge->master_rmw = tsi148_master_rmw;
+ tsi148_bridge->dma_list_add = tsi148_dma_list_add;
+ tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
+ tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
+ tsi148_bridge->irq_set = tsi148_irq_set;
+ tsi148_bridge->irq_generate = tsi148_irq_generate;
+ tsi148_bridge->lm_set = tsi148_lm_set;
+ tsi148_bridge->lm_get = tsi148_lm_get;
+ tsi148_bridge->lm_attach = tsi148_lm_attach;
+ tsi148_bridge->lm_detach = tsi148_lm_detach;
+ tsi148_bridge->slot_get = tsi148_slot_get;
+ tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
+ tsi148_bridge->free_consistent = tsi148_free_consistent;
+
+ data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
+ dev_info(&pdev->dev, "Board is%s the VME system controller\n",
+ (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
+ if (!geoid)
+ dev_info(&pdev->dev, "VME geographical address is %d\n",
+ data & TSI148_LCSR_VSTAT_GA_M);
+ else
+ dev_info(&pdev->dev, "VME geographical address is set to %d\n",
+ geoid);
+
+ dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
+ err_chk ? "enabled" : "disabled");
+
+ if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
+ dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
+ goto err_crcsr;
+ }
+
+ retval = vme_register_bridge(tsi148_bridge);
+ if (retval != 0) {
+ dev_err(&pdev->dev, "Chip Registration failed.\n");
+ goto err_reg;
+ }
+
+ pci_set_drvdata(pdev, tsi148_bridge);
+
+ /* Clear VME bus "board fail", and "power-up reset" lines */
+ data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
+ data &= ~TSI148_LCSR_VSTAT_BRDFL;
+ data |= TSI148_LCSR_VSTAT_CPURST;
+ iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
+
+ return 0;
+
+err_reg:
+ tsi148_crcsr_exit(tsi148_bridge, pdev);
+err_crcsr:
+err_lm:
+ /* resources are stored in link list */
+ list_for_each(pos, &tsi148_bridge->lm_resources) {
+ lm = list_entry(pos, struct vme_lm_resource, list);
+ list_del(pos);
+ kfree(lm);
+ }
+err_dma:
+ /* resources are stored in link list */
+ list_for_each(pos, &tsi148_bridge->dma_resources) {
+ dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
+ list_del(pos);
+ kfree(dma_ctrlr);
+ }
+err_slave:
+ /* resources are stored in link list */
+ list_for_each(pos, &tsi148_bridge->slave_resources) {
+ slave_image = list_entry(pos, struct vme_slave_resource, list);
+ list_del(pos);
+ kfree(slave_image);
+ }
+err_master:
+ /* resources are stored in link list */
+ list_for_each(pos, &tsi148_bridge->master_resources) {
+ master_image = list_entry(pos, struct vme_master_resource,
+ list);
+ list_del(pos);
+ kfree(master_image);
+ }
+
+ tsi148_irq_exit(tsi148_bridge, pdev);
+err_irq:
+err_test:
+ iounmap(tsi148_device->base);
+err_remap:
+ pci_release_regions(pdev);
+err_resource:
+ pci_disable_device(pdev);
+err_enable:
+ kfree(tsi148_device);
+err_driver:
+ kfree(tsi148_bridge);
+err_struct:
+ return retval;
+
+}
+
+static void tsi148_remove(struct pci_dev *pdev)
+{
+ struct list_head *pos = NULL;
+ struct list_head *tmplist;
+ struct vme_master_resource *master_image;
+ struct vme_slave_resource *slave_image;
+ struct vme_dma_resource *dma_ctrlr;
+ int i;
+ struct tsi148_driver *bridge;
+ struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
+
+ bridge = tsi148_bridge->driver_priv;
+
+
+ dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
+
+ /*
+ * Shutdown all inbound and outbound windows.
+ */
+ for (i = 0; i < 8; i++) {
+ iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
+ TSI148_LCSR_OFFSET_ITAT);
+ iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
+ TSI148_LCSR_OFFSET_OTAT);
+ }
+
+ /*
+ * Shutdown Location monitor.
+ */
+ iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
+
+ /*
+ * Shutdown CRG map.
+ */
+ iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
+
+ /*
+ * Clear error status.
+ */
+ iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
+ iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
+ iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
+
+ /*
+ * Remove VIRQ interrupt (if any)
+ */
+ if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
+ iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
+
+ /*
+ * Map all Interrupts to PCI INTA
+ */
+ iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
+ iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
+
+ tsi148_irq_exit(tsi148_bridge, pdev);
+
+ vme_unregister_bridge(tsi148_bridge);
+
+ tsi148_crcsr_exit(tsi148_bridge, pdev);
+
+ /* resources are stored in link list */
+ list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
+ dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
+ list_del(pos);
+ kfree(dma_ctrlr);
+ }
+
+ /* resources are stored in link list */
+ list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
+ slave_image = list_entry(pos, struct vme_slave_resource, list);
+ list_del(pos);
+ kfree(slave_image);
+ }
+
+ /* resources are stored in link list */
+ list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
+ master_image = list_entry(pos, struct vme_master_resource,
+ list);
+ list_del(pos);
+ kfree(master_image);
+ }
+
+ iounmap(bridge->base);
+
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+
+ kfree(tsi148_bridge->driver_priv);
+
+ kfree(tsi148_bridge);
+}
+
+static void __exit tsi148_exit(void)
+{
+ pci_unregister_driver(&tsi148_driver);
+}
+
+MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
+module_param(err_chk, bool, 0);
+
+MODULE_PARM_DESC(geoid, "Override geographical addressing");
+module_param(geoid, int, 0);
+
+MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
+MODULE_LICENSE("GPL");
+
+module_init(tsi148_init);
+module_exit(tsi148_exit);
--- /dev/null
+/*
+ * tsi148.h
+ *
+ * Support for the Tundra TSI148 VME Bridge chip
+ *
+ * Author: Tom Armistead
+ * Updated and maintained by Ajit Prem
+ * Copyright 2004 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef TSI148_H
+#define TSI148_H
+
+#ifndef PCI_VENDOR_ID_TUNDRA
+#define PCI_VENDOR_ID_TUNDRA 0x10e3
+#endif
+
+#ifndef PCI_DEVICE_ID_TUNDRA_TSI148
+#define PCI_DEVICE_ID_TUNDRA_TSI148 0x148
+#endif
+
+/*
+ * Define the number of each that the Tsi148 supports.
+ */
+#define TSI148_MAX_MASTER 8 /* Max Master Windows */
+#define TSI148_MAX_SLAVE 8 /* Max Slave Windows */
+#define TSI148_MAX_DMA 2 /* Max DMA Controllers */
+#define TSI148_MAX_MAILBOX 4 /* Max Mail Box registers */
+#define TSI148_MAX_SEMAPHORE 8 /* Max Semaphores */
+
+/* Structure used to hold driver specific information */
+struct tsi148_driver {
+ void __iomem *base; /* Base Address of device registers */
+ wait_queue_head_t dma_queue[2];
+ wait_queue_head_t iack_queue;
+ void (*lm_callback[4])(int); /* Called in interrupt handler */
+ void *crcsr_kernel;
+ dma_addr_t crcsr_bus;
+ struct vme_master_resource *flush_image;
+ struct mutex vme_rmw; /* Only one RMW cycle at a time */
+ struct mutex vme_int; /*
+ * Only one VME interrupt can be
+ * generated at a time, provide locking
+ */
+};
+
+/*
+ * Layout of a DMAC Linked-List Descriptor
+ *
+ * Note: This structure is accessed via the chip and therefore must be
+ * correctly laid out - It must also be aligned on 64-bit boundaries.
+ */
+struct tsi148_dma_descriptor {
+ __be32 dsau; /* Source Address */
+ __be32 dsal;
+ __be32 ddau; /* Destination Address */
+ __be32 ddal;
+ __be32 dsat; /* Source attributes */
+ __be32 ddat; /* Destination attributes */
+ __be32 dnlau; /* Next link address */
+ __be32 dnlal;
+ __be32 dcnt; /* Byte count */
+ __be32 ddbs; /* 2eSST Broadcast select */
+};
+
+struct tsi148_dma_entry {
+ /*
+ * The descriptor needs to be aligned on a 64-bit boundary, we increase
+ * the chance of this by putting it first in the structure.
+ */
+ struct tsi148_dma_descriptor descriptor;
+ struct list_head list;
+ dma_addr_t dma_handle;
+};
+
+/*
+ * TSI148 ASIC register structure overlays and bit field definitions.
+ *
+ * Note: Tsi148 Register Group (CRG) consists of the following
+ * combination of registers:
+ * PCFS - PCI Configuration Space Registers
+ * LCSR - Local Control and Status Registers
+ * GCSR - Global Control and Status Registers
+ * CR/CSR - Subset of Configuration ROM /
+ * Control and Status Registers
+ */
+
+
+/*
+ * Command/Status Registers (CRG + $004)
+ */
+#define TSI148_PCFS_ID 0x0
+#define TSI148_PCFS_CSR 0x4
+#define TSI148_PCFS_CLASS 0x8
+#define TSI148_PCFS_MISC0 0xC
+#define TSI148_PCFS_MBARL 0x10
+#define TSI148_PCFS_MBARU 0x14
+
+#define TSI148_PCFS_SUBID 0x28
+
+#define TSI148_PCFS_CAPP 0x34
+
+#define TSI148_PCFS_MISC1 0x3C
+
+#define TSI148_PCFS_XCAPP 0x40
+#define TSI148_PCFS_XSTAT 0x44
+
+/*
+ * LCSR definitions
+ */
+
+/*
+ * Outbound Translations
+ */
+#define TSI148_LCSR_OT0_OTSAU 0x100
+#define TSI148_LCSR_OT0_OTSAL 0x104
+#define TSI148_LCSR_OT0_OTEAU 0x108
+#define TSI148_LCSR_OT0_OTEAL 0x10C
+#define TSI148_LCSR_OT0_OTOFU 0x110
+#define TSI148_LCSR_OT0_OTOFL 0x114
+#define TSI148_LCSR_OT0_OTBS 0x118
+#define TSI148_LCSR_OT0_OTAT 0x11C
+
+#define TSI148_LCSR_OT1_OTSAU 0x120
+#define TSI148_LCSR_OT1_OTSAL 0x124
+#define TSI148_LCSR_OT1_OTEAU 0x128
+#define TSI148_LCSR_OT1_OTEAL 0x12C
+#define TSI148_LCSR_OT1_OTOFU 0x130
+#define TSI148_LCSR_OT1_OTOFL 0x134
+#define TSI148_LCSR_OT1_OTBS 0x138
+#define TSI148_LCSR_OT1_OTAT 0x13C
+
+#define TSI148_LCSR_OT2_OTSAU 0x140
+#define TSI148_LCSR_OT2_OTSAL 0x144
+#define TSI148_LCSR_OT2_OTEAU 0x148
+#define TSI148_LCSR_OT2_OTEAL 0x14C
+#define TSI148_LCSR_OT2_OTOFU 0x150
+#define TSI148_LCSR_OT2_OTOFL 0x154
+#define TSI148_LCSR_OT2_OTBS 0x158
+#define TSI148_LCSR_OT2_OTAT 0x15C
+
+#define TSI148_LCSR_OT3_OTSAU 0x160
+#define TSI148_LCSR_OT3_OTSAL 0x164
+#define TSI148_LCSR_OT3_OTEAU 0x168
+#define TSI148_LCSR_OT3_OTEAL 0x16C
+#define TSI148_LCSR_OT3_OTOFU 0x170
+#define TSI148_LCSR_OT3_OTOFL 0x174
+#define TSI148_LCSR_OT3_OTBS 0x178
+#define TSI148_LCSR_OT3_OTAT 0x17C
+
+#define TSI148_LCSR_OT4_OTSAU 0x180
+#define TSI148_LCSR_OT4_OTSAL 0x184
+#define TSI148_LCSR_OT4_OTEAU 0x188
+#define TSI148_LCSR_OT4_OTEAL 0x18C
+#define TSI148_LCSR_OT4_OTOFU 0x190
+#define TSI148_LCSR_OT4_OTOFL 0x194
+#define TSI148_LCSR_OT4_OTBS 0x198
+#define TSI148_LCSR_OT4_OTAT 0x19C
+
+#define TSI148_LCSR_OT5_OTSAU 0x1A0
+#define TSI148_LCSR_OT5_OTSAL 0x1A4
+#define TSI148_LCSR_OT5_OTEAU 0x1A8
+#define TSI148_LCSR_OT5_OTEAL 0x1AC
+#define TSI148_LCSR_OT5_OTOFU 0x1B0
+#define TSI148_LCSR_OT5_OTOFL 0x1B4
+#define TSI148_LCSR_OT5_OTBS 0x1B8
+#define TSI148_LCSR_OT5_OTAT 0x1BC
+
+#define TSI148_LCSR_OT6_OTSAU 0x1C0
+#define TSI148_LCSR_OT6_OTSAL 0x1C4
+#define TSI148_LCSR_OT6_OTEAU 0x1C8
+#define TSI148_LCSR_OT6_OTEAL 0x1CC
+#define TSI148_LCSR_OT6_OTOFU 0x1D0
+#define TSI148_LCSR_OT6_OTOFL 0x1D4
+#define TSI148_LCSR_OT6_OTBS 0x1D8
+#define TSI148_LCSR_OT6_OTAT 0x1DC
+
+#define TSI148_LCSR_OT7_OTSAU 0x1E0
+#define TSI148_LCSR_OT7_OTSAL 0x1E4
+#define TSI148_LCSR_OT7_OTEAU 0x1E8
+#define TSI148_LCSR_OT7_OTEAL 0x1EC
+#define TSI148_LCSR_OT7_OTOFU 0x1F0
+#define TSI148_LCSR_OT7_OTOFL 0x1F4
+#define TSI148_LCSR_OT7_OTBS 0x1F8
+#define TSI148_LCSR_OT7_OTAT 0x1FC
+
+#define TSI148_LCSR_OT0 0x100
+#define TSI148_LCSR_OT1 0x120
+#define TSI148_LCSR_OT2 0x140
+#define TSI148_LCSR_OT3 0x160
+#define TSI148_LCSR_OT4 0x180
+#define TSI148_LCSR_OT5 0x1A0
+#define TSI148_LCSR_OT6 0x1C0
+#define TSI148_LCSR_OT7 0x1E0
+
+static const int TSI148_LCSR_OT[8] = { TSI148_LCSR_OT0, TSI148_LCSR_OT1,
+ TSI148_LCSR_OT2, TSI148_LCSR_OT3,
+ TSI148_LCSR_OT4, TSI148_LCSR_OT5,
+ TSI148_LCSR_OT6, TSI148_LCSR_OT7 };
+
+#define TSI148_LCSR_OFFSET_OTSAU 0x0
+#define TSI148_LCSR_OFFSET_OTSAL 0x4
+#define TSI148_LCSR_OFFSET_OTEAU 0x8
+#define TSI148_LCSR_OFFSET_OTEAL 0xC
+#define TSI148_LCSR_OFFSET_OTOFU 0x10
+#define TSI148_LCSR_OFFSET_OTOFL 0x14
+#define TSI148_LCSR_OFFSET_OTBS 0x18
+#define TSI148_LCSR_OFFSET_OTAT 0x1C
+
+/*
+ * VMEbus interrupt ack
+ * offset 200
+ */
+#define TSI148_LCSR_VIACK1 0x204
+#define TSI148_LCSR_VIACK2 0x208
+#define TSI148_LCSR_VIACK3 0x20C
+#define TSI148_LCSR_VIACK4 0x210
+#define TSI148_LCSR_VIACK5 0x214
+#define TSI148_LCSR_VIACK6 0x218
+#define TSI148_LCSR_VIACK7 0x21C
+
+static const int TSI148_LCSR_VIACK[8] = { 0, TSI148_LCSR_VIACK1,
+ TSI148_LCSR_VIACK2, TSI148_LCSR_VIACK3,
+ TSI148_LCSR_VIACK4, TSI148_LCSR_VIACK5,
+ TSI148_LCSR_VIACK6, TSI148_LCSR_VIACK7 };
+
+/*
+ * RMW
+ * offset 220
+ */
+#define TSI148_LCSR_RMWAU 0x220
+#define TSI148_LCSR_RMWAL 0x224
+#define TSI148_LCSR_RMWEN 0x228
+#define TSI148_LCSR_RMWC 0x22C
+#define TSI148_LCSR_RMWS 0x230
+
+/*
+ * VMEbus control
+ * offset 234
+ */
+#define TSI148_LCSR_VMCTRL 0x234
+#define TSI148_LCSR_VCTRL 0x238
+#define TSI148_LCSR_VSTAT 0x23C
+
+/*
+ * PCI status
+ * offset 240
+ */
+#define TSI148_LCSR_PSTAT 0x240
+
+/*
+ * VME filter.
+ * offset 250
+ */
+#define TSI148_LCSR_VMEFL 0x250
+
+ /*
+ * VME exception.
+ * offset 260
+ */
+#define TSI148_LCSR_VEAU 0x260
+#define TSI148_LCSR_VEAL 0x264
+#define TSI148_LCSR_VEAT 0x268
+
+ /*
+ * PCI error
+ * offset 270
+ */
+#define TSI148_LCSR_EDPAU 0x270
+#define TSI148_LCSR_EDPAL 0x274
+#define TSI148_LCSR_EDPXA 0x278
+#define TSI148_LCSR_EDPXS 0x27C
+#define TSI148_LCSR_EDPAT 0x280
+
+ /*
+ * Inbound Translations
+ * offset 300
+ */
+#define TSI148_LCSR_IT0_ITSAU 0x300
+#define TSI148_LCSR_IT0_ITSAL 0x304
+#define TSI148_LCSR_IT0_ITEAU 0x308
+#define TSI148_LCSR_IT0_ITEAL 0x30C
+#define TSI148_LCSR_IT0_ITOFU 0x310
+#define TSI148_LCSR_IT0_ITOFL 0x314
+#define TSI148_LCSR_IT0_ITAT 0x318
+
+#define TSI148_LCSR_IT1_ITSAU 0x320
+#define TSI148_LCSR_IT1_ITSAL 0x324
+#define TSI148_LCSR_IT1_ITEAU 0x328
+#define TSI148_LCSR_IT1_ITEAL 0x32C
+#define TSI148_LCSR_IT1_ITOFU 0x330
+#define TSI148_LCSR_IT1_ITOFL 0x334
+#define TSI148_LCSR_IT1_ITAT 0x338
+
+#define TSI148_LCSR_IT2_ITSAU 0x340
+#define TSI148_LCSR_IT2_ITSAL 0x344
+#define TSI148_LCSR_IT2_ITEAU 0x348
+#define TSI148_LCSR_IT2_ITEAL 0x34C
+#define TSI148_LCSR_IT2_ITOFU 0x350
+#define TSI148_LCSR_IT2_ITOFL 0x354
+#define TSI148_LCSR_IT2_ITAT 0x358
+
+#define TSI148_LCSR_IT3_ITSAU 0x360
+#define TSI148_LCSR_IT3_ITSAL 0x364
+#define TSI148_LCSR_IT3_ITEAU 0x368
+#define TSI148_LCSR_IT3_ITEAL 0x36C
+#define TSI148_LCSR_IT3_ITOFU 0x370
+#define TSI148_LCSR_IT3_ITOFL 0x374
+#define TSI148_LCSR_IT3_ITAT 0x378
+
+#define TSI148_LCSR_IT4_ITSAU 0x380
+#define TSI148_LCSR_IT4_ITSAL 0x384
+#define TSI148_LCSR_IT4_ITEAU 0x388
+#define TSI148_LCSR_IT4_ITEAL 0x38C
+#define TSI148_LCSR_IT4_ITOFU 0x390
+#define TSI148_LCSR_IT4_ITOFL 0x394
+#define TSI148_LCSR_IT4_ITAT 0x398
+
+#define TSI148_LCSR_IT5_ITSAU 0x3A0
+#define TSI148_LCSR_IT5_ITSAL 0x3A4
+#define TSI148_LCSR_IT5_ITEAU 0x3A8
+#define TSI148_LCSR_IT5_ITEAL 0x3AC
+#define TSI148_LCSR_IT5_ITOFU 0x3B0
+#define TSI148_LCSR_IT5_ITOFL 0x3B4
+#define TSI148_LCSR_IT5_ITAT 0x3B8
+
+#define TSI148_LCSR_IT6_ITSAU 0x3C0
+#define TSI148_LCSR_IT6_ITSAL 0x3C4
+#define TSI148_LCSR_IT6_ITEAU 0x3C8
+#define TSI148_LCSR_IT6_ITEAL 0x3CC
+#define TSI148_LCSR_IT6_ITOFU 0x3D0
+#define TSI148_LCSR_IT6_ITOFL 0x3D4
+#define TSI148_LCSR_IT6_ITAT 0x3D8
+
+#define TSI148_LCSR_IT7_ITSAU 0x3E0
+#define TSI148_LCSR_IT7_ITSAL 0x3E4
+#define TSI148_LCSR_IT7_ITEAU 0x3E8
+#define TSI148_LCSR_IT7_ITEAL 0x3EC
+#define TSI148_LCSR_IT7_ITOFU 0x3F0
+#define TSI148_LCSR_IT7_ITOFL 0x3F4
+#define TSI148_LCSR_IT7_ITAT 0x3F8
+
+
+#define TSI148_LCSR_IT0 0x300
+#define TSI148_LCSR_IT1 0x320
+#define TSI148_LCSR_IT2 0x340
+#define TSI148_LCSR_IT3 0x360
+#define TSI148_LCSR_IT4 0x380
+#define TSI148_LCSR_IT5 0x3A0
+#define TSI148_LCSR_IT6 0x3C0
+#define TSI148_LCSR_IT7 0x3E0
+
+static const int TSI148_LCSR_IT[8] = { TSI148_LCSR_IT0, TSI148_LCSR_IT1,
+ TSI148_LCSR_IT2, TSI148_LCSR_IT3,
+ TSI148_LCSR_IT4, TSI148_LCSR_IT5,
+ TSI148_LCSR_IT6, TSI148_LCSR_IT7 };
+
+#define TSI148_LCSR_OFFSET_ITSAU 0x0
+#define TSI148_LCSR_OFFSET_ITSAL 0x4
+#define TSI148_LCSR_OFFSET_ITEAU 0x8
+#define TSI148_LCSR_OFFSET_ITEAL 0xC
+#define TSI148_LCSR_OFFSET_ITOFU 0x10
+#define TSI148_LCSR_OFFSET_ITOFL 0x14
+#define TSI148_LCSR_OFFSET_ITAT 0x18
+
+ /*
+ * Inbound Translation GCSR
+ * offset 400
+ */
+#define TSI148_LCSR_GBAU 0x400
+#define TSI148_LCSR_GBAL 0x404
+#define TSI148_LCSR_GCSRAT 0x408
+
+ /*
+ * Inbound Translation CRG
+ * offset 40C
+ */
+#define TSI148_LCSR_CBAU 0x40C
+#define TSI148_LCSR_CBAL 0x410
+#define TSI148_LCSR_CSRAT 0x414
+
+ /*
+ * Inbound Translation CR/CSR
+ * CRG
+ * offset 418
+ */
+#define TSI148_LCSR_CROU 0x418
+#define TSI148_LCSR_CROL 0x41C
+#define TSI148_LCSR_CRAT 0x420
+
+ /*
+ * Inbound Translation Location Monitor
+ * offset 424
+ */
+#define TSI148_LCSR_LMBAU 0x424
+#define TSI148_LCSR_LMBAL 0x428
+#define TSI148_LCSR_LMAT 0x42C
+
+ /*
+ * VMEbus Interrupt Control.
+ * offset 430
+ */
+#define TSI148_LCSR_BCU 0x430
+#define TSI148_LCSR_BCL 0x434
+#define TSI148_LCSR_BPGTR 0x438
+#define TSI148_LCSR_BPCTR 0x43C
+#define TSI148_LCSR_VICR 0x440
+
+ /*
+ * Local Bus Interrupt Control.
+ * offset 448
+ */
+#define TSI148_LCSR_INTEN 0x448
+#define TSI148_LCSR_INTEO 0x44C
+#define TSI148_LCSR_INTS 0x450
+#define TSI148_LCSR_INTC 0x454
+#define TSI148_LCSR_INTM1 0x458
+#define TSI148_LCSR_INTM2 0x45C
+
+ /*
+ * DMA Controllers
+ * offset 500
+ */
+#define TSI148_LCSR_DCTL0 0x500
+#define TSI148_LCSR_DSTA0 0x504
+#define TSI148_LCSR_DCSAU0 0x508
+#define TSI148_LCSR_DCSAL0 0x50C
+#define TSI148_LCSR_DCDAU0 0x510
+#define TSI148_LCSR_DCDAL0 0x514
+#define TSI148_LCSR_DCLAU0 0x518
+#define TSI148_LCSR_DCLAL0 0x51C
+#define TSI148_LCSR_DSAU0 0x520
+#define TSI148_LCSR_DSAL0 0x524
+#define TSI148_LCSR_DDAU0 0x528
+#define TSI148_LCSR_DDAL0 0x52C
+#define TSI148_LCSR_DSAT0 0x530
+#define TSI148_LCSR_DDAT0 0x534
+#define TSI148_LCSR_DNLAU0 0x538
+#define TSI148_LCSR_DNLAL0 0x53C
+#define TSI148_LCSR_DCNT0 0x540
+#define TSI148_LCSR_DDBS0 0x544
+
+#define TSI148_LCSR_DCTL1 0x580
+#define TSI148_LCSR_DSTA1 0x584
+#define TSI148_LCSR_DCSAU1 0x588
+#define TSI148_LCSR_DCSAL1 0x58C
+#define TSI148_LCSR_DCDAU1 0x590
+#define TSI148_LCSR_DCDAL1 0x594
+#define TSI148_LCSR_DCLAU1 0x598
+#define TSI148_LCSR_DCLAL1 0x59C
+#define TSI148_LCSR_DSAU1 0x5A0
+#define TSI148_LCSR_DSAL1 0x5A4
+#define TSI148_LCSR_DDAU1 0x5A8
+#define TSI148_LCSR_DDAL1 0x5AC
+#define TSI148_LCSR_DSAT1 0x5B0
+#define TSI148_LCSR_DDAT1 0x5B4
+#define TSI148_LCSR_DNLAU1 0x5B8
+#define TSI148_LCSR_DNLAL1 0x5BC
+#define TSI148_LCSR_DCNT1 0x5C0
+#define TSI148_LCSR_DDBS1 0x5C4
+
+#define TSI148_LCSR_DMA0 0x500
+#define TSI148_LCSR_DMA1 0x580
+
+
+static const int TSI148_LCSR_DMA[TSI148_MAX_DMA] = { TSI148_LCSR_DMA0,
+ TSI148_LCSR_DMA1 };
+
+#define TSI148_LCSR_OFFSET_DCTL 0x0
+#define TSI148_LCSR_OFFSET_DSTA 0x4
+#define TSI148_LCSR_OFFSET_DCSAU 0x8
+#define TSI148_LCSR_OFFSET_DCSAL 0xC
+#define TSI148_LCSR_OFFSET_DCDAU 0x10
+#define TSI148_LCSR_OFFSET_DCDAL 0x14
+#define TSI148_LCSR_OFFSET_DCLAU 0x18
+#define TSI148_LCSR_OFFSET_DCLAL 0x1C
+#define TSI148_LCSR_OFFSET_DSAU 0x20
+#define TSI148_LCSR_OFFSET_DSAL 0x24
+#define TSI148_LCSR_OFFSET_DDAU 0x28
+#define TSI148_LCSR_OFFSET_DDAL 0x2C
+#define TSI148_LCSR_OFFSET_DSAT 0x30
+#define TSI148_LCSR_OFFSET_DDAT 0x34
+#define TSI148_LCSR_OFFSET_DNLAU 0x38
+#define TSI148_LCSR_OFFSET_DNLAL 0x3C
+#define TSI148_LCSR_OFFSET_DCNT 0x40
+#define TSI148_LCSR_OFFSET_DDBS 0x44
+
+ /*
+ * GCSR Register Group
+ */
+
+ /*
+ * GCSR CRG
+ * offset 00 600 - DEVI/VENI
+ * offset 04 604 - CTRL/GA/REVID
+ * offset 08 608 - Semaphore3/2/1/0
+ * offset 0C 60C - Seamphore7/6/5/4
+ */
+#define TSI148_GCSR_ID 0x600
+#define TSI148_GCSR_CSR 0x604
+#define TSI148_GCSR_SEMA0 0x608
+#define TSI148_GCSR_SEMA1 0x60C
+
+ /*
+ * Mail Box
+ * GCSR CRG
+ * offset 10 610 - Mailbox0
+ */
+#define TSI148_GCSR_MBOX0 0x610
+#define TSI148_GCSR_MBOX1 0x614
+#define TSI148_GCSR_MBOX2 0x618
+#define TSI148_GCSR_MBOX3 0x61C
+
+static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
+ TSI148_GCSR_MBOX1,
+ TSI148_GCSR_MBOX2,
+ TSI148_GCSR_MBOX3 };
+
+ /*
+ * CR/CSR
+ */
+
+ /*
+ * CR/CSR CRG
+ * offset 7FFF4 FF4 - CSRBCR
+ * offset 7FFF8 FF8 - CSRBSR
+ * offset 7FFFC FFC - CBAR
+ */
+#define TSI148_CSRBCR 0xFF4
+#define TSI148_CSRBSR 0xFF8
+#define TSI148_CBAR 0xFFC
+
+
+
+
+ /*
+ * TSI148 Register Bit Definitions
+ */
+
+ /*
+ * PFCS Register Set
+ */
+#define TSI148_PCFS_CMMD_SERR (1<<8) /* SERR_L out pin ssys err */
+#define TSI148_PCFS_CMMD_PERR (1<<6) /* PERR_L out pin parity */
+#define TSI148_PCFS_CMMD_MSTR (1<<2) /* PCI bus master */
+#define TSI148_PCFS_CMMD_MEMSP (1<<1) /* PCI mem space access */
+#define TSI148_PCFS_CMMD_IOSP (1<<0) /* PCI I/O space enable */
+
+#define TSI148_PCFS_STAT_RCPVE (1<<15) /* Detected Parity Error */
+#define TSI148_PCFS_STAT_SIGSE (1<<14) /* Signalled System Error */
+#define TSI148_PCFS_STAT_RCVMA (1<<13) /* Received Master Abort */
+#define TSI148_PCFS_STAT_RCVTA (1<<12) /* Received Target Abort */
+#define TSI148_PCFS_STAT_SIGTA (1<<11) /* Signalled Target Abort */
+#define TSI148_PCFS_STAT_SELTIM (3<<9) /* DELSEL Timing */
+#define TSI148_PCFS_STAT_DPAR (1<<8) /* Data Parity Err Reported */
+#define TSI148_PCFS_STAT_FAST (1<<7) /* Fast back-to-back Cap */
+#define TSI148_PCFS_STAT_P66M (1<<5) /* 66 MHz Capable */
+#define TSI148_PCFS_STAT_CAPL (1<<4) /* Capab List - address $34 */
+
+/*
+ * Revision ID/Class Code Registers (CRG +$008)
+ */
+#define TSI148_PCFS_CLAS_M (0xFF<<24) /* Class ID */
+#define TSI148_PCFS_SUBCLAS_M (0xFF<<16) /* Sub-Class ID */
+#define TSI148_PCFS_PROGIF_M (0xFF<<8) /* Sub-Class ID */
+#define TSI148_PCFS_REVID_M (0xFF<<0) /* Rev ID */
+
+/*
+ * Cache Line Size/ Master Latency Timer/ Header Type Registers (CRG + $00C)
+ */
+#define TSI148_PCFS_HEAD_M (0xFF<<16) /* Master Lat Timer */
+#define TSI148_PCFS_MLAT_M (0xFF<<8) /* Master Lat Timer */
+#define TSI148_PCFS_CLSZ_M (0xFF<<0) /* Cache Line Size */
+
+/*
+ * Memory Base Address Lower Reg (CRG + $010)
+ */
+#define TSI148_PCFS_MBARL_BASEL_M (0xFFFFF<<12) /* Base Addr Lower Mask */
+#define TSI148_PCFS_MBARL_PRE (1<<3) /* Prefetch */
+#define TSI148_PCFS_MBARL_MTYPE_M (3<<1) /* Memory Type Mask */
+#define TSI148_PCFS_MBARL_IOMEM (1<<0) /* I/O Space Indicator */
+
+/*
+ * Message Signaled Interrupt Capabilities Register (CRG + $040)
+ */
+#define TSI148_PCFS_MSICAP_64BAC (1<<7) /* 64-bit Address Capable */
+#define TSI148_PCFS_MSICAP_MME_M (7<<4) /* Multiple Msg Enable Mask */
+#define TSI148_PCFS_MSICAP_MMC_M (7<<1) /* Multiple Msg Capable Mask */
+#define TSI148_PCFS_MSICAP_MSIEN (1<<0) /* Msg signaled INT Enable */
+
+/*
+ * Message Address Lower Register (CRG +$044)
+ */
+#define TSI148_PCFS_MSIAL_M (0x3FFFFFFF<<2) /* Mask */
+
+/*
+ * Message Data Register (CRG + 4C)
+ */
+#define TSI148_PCFS_MSIMD_M (0xFFFF<<0) /* Mask */
+
+/*
+ * PCI-X Capabilities Register (CRG + $050)
+ */
+#define TSI148_PCFS_PCIXCAP_MOST_M (7<<4) /* Max outstanding Split Tran */
+#define TSI148_PCFS_PCIXCAP_MMRBC_M (3<<2) /* Max Mem Read byte cnt */
+#define TSI148_PCFS_PCIXCAP_ERO (1<<1) /* Enable Relaxed Ordering */
+#define TSI148_PCFS_PCIXCAP_DPERE (1<<0) /* Data Parity Recover Enable */
+
+/*
+ * PCI-X Status Register (CRG +$054)
+ */
+#define TSI148_PCFS_PCIXSTAT_RSCEM (1<<29) /* Received Split Comp Error */
+#define TSI148_PCFS_PCIXSTAT_DMCRS_M (7<<26) /* max Cumulative Read Size */
+#define TSI148_PCFS_PCIXSTAT_DMOST_M (7<<23) /* max outstanding Split Trans
+ */
+#define TSI148_PCFS_PCIXSTAT_DMMRC_M (3<<21) /* max mem read byte count */
+#define TSI148_PCFS_PCIXSTAT_DC (1<<20) /* Device Complexity */
+#define TSI148_PCFS_PCIXSTAT_USC (1<<19) /* Unexpected Split comp */
+#define TSI148_PCFS_PCIXSTAT_SCD (1<<18) /* Split completion discard */
+#define TSI148_PCFS_PCIXSTAT_133C (1<<17) /* 133MHz capable */
+#define TSI148_PCFS_PCIXSTAT_64D (1<<16) /* 64 bit device */
+#define TSI148_PCFS_PCIXSTAT_BN_M (0xFF<<8) /* Bus number */
+#define TSI148_PCFS_PCIXSTAT_DN_M (0x1F<<3) /* Device number */
+#define TSI148_PCFS_PCIXSTAT_FN_M (7<<0) /* Function Number */
+
+/*
+ * LCSR Registers
+ */
+
+/*
+ * Outbound Translation Starting Address Lower
+ */
+#define TSI148_LCSR_OTSAL_M (0xFFFF<<16) /* Mask */
+
+/*
+ * Outbound Translation Ending Address Lower
+ */
+#define TSI148_LCSR_OTEAL_M (0xFFFF<<16) /* Mask */
+
+/*
+ * Outbound Translation Offset Lower
+ */
+#define TSI148_LCSR_OTOFFL_M (0xFFFF<<16) /* Mask */
+
+/*
+ * Outbound Translation 2eSST Broadcast Select
+ */
+#define TSI148_LCSR_OTBS_M (0xFFFFF<<0) /* Mask */
+
+/*
+ * Outbound Translation Attribute
+ */
+#define TSI148_LCSR_OTAT_EN (1<<31) /* Window Enable */
+#define TSI148_LCSR_OTAT_MRPFD (1<<18) /* Prefetch Disable */
+
+#define TSI148_LCSR_OTAT_PFS_M (3<<16) /* Prefetch Size Mask */
+#define TSI148_LCSR_OTAT_PFS_2 (0<<16) /* 2 Cache Lines P Size */
+#define TSI148_LCSR_OTAT_PFS_4 (1<<16) /* 4 Cache Lines P Size */
+#define TSI148_LCSR_OTAT_PFS_8 (2<<16) /* 8 Cache Lines P Size */
+#define TSI148_LCSR_OTAT_PFS_16 (3<<16) /* 16 Cache Lines P Size */
+
+#define TSI148_LCSR_OTAT_2eSSTM_M (7<<11) /* 2eSST Xfer Rate Mask */
+#define TSI148_LCSR_OTAT_2eSSTM_160 (0<<11) /* 160MB/s 2eSST Xfer Rate */
+#define TSI148_LCSR_OTAT_2eSSTM_267 (1<<11) /* 267MB/s 2eSST Xfer Rate */
+#define TSI148_LCSR_OTAT_2eSSTM_320 (2<<11) /* 320MB/s 2eSST Xfer Rate */
+
+#define TSI148_LCSR_OTAT_TM_M (7<<8) /* Xfer Protocol Mask */
+#define TSI148_LCSR_OTAT_TM_SCT (0<<8) /* SCT Xfer Protocol */
+#define TSI148_LCSR_OTAT_TM_BLT (1<<8) /* BLT Xfer Protocol */
+#define TSI148_LCSR_OTAT_TM_MBLT (2<<8) /* MBLT Xfer Protocol */
+#define TSI148_LCSR_OTAT_TM_2eVME (3<<8) /* 2eVME Xfer Protocol */
+#define TSI148_LCSR_OTAT_TM_2eSST (4<<8) /* 2eSST Xfer Protocol */
+#define TSI148_LCSR_OTAT_TM_2eSSTB (5<<8) /* 2eSST Bcast Xfer Protocol */
+
+#define TSI148_LCSR_OTAT_DBW_M (3<<6) /* Max Data Width */
+#define TSI148_LCSR_OTAT_DBW_16 (0<<6) /* 16-bit Data Width */
+#define TSI148_LCSR_OTAT_DBW_32 (1<<6) /* 32-bit Data Width */
+
+#define TSI148_LCSR_OTAT_SUP (1<<5) /* Supervisory Access */
+#define TSI148_LCSR_OTAT_PGM (1<<4) /* Program Access */
+
+#define TSI148_LCSR_OTAT_AMODE_M (0xf<<0) /* Address Mode Mask */
+#define TSI148_LCSR_OTAT_AMODE_A16 (0<<0) /* A16 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_A24 (1<<0) /* A24 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_A32 (2<<0) /* A32 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_A64 (4<<0) /* A32 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_CRCSR (5<<0) /* CR/CSR Address Space */
+#define TSI148_LCSR_OTAT_AMODE_USER1 (8<<0) /* User1 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_USER2 (9<<0) /* User2 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_USER3 (10<<0) /* User3 Address Space */
+#define TSI148_LCSR_OTAT_AMODE_USER4 (11<<0) /* User4 Address Space */
+
+/*
+ * VME Master Control Register CRG+$234
+ */
+#define TSI148_LCSR_VMCTRL_VSA (1<<27) /* VMEbus Stop Ack */
+#define TSI148_LCSR_VMCTRL_VS (1<<26) /* VMEbus Stop */
+#define TSI148_LCSR_VMCTRL_DHB (1<<25) /* Device Has Bus */
+#define TSI148_LCSR_VMCTRL_DWB (1<<24) /* Device Wants Bus */
+
+#define TSI148_LCSR_VMCTRL_RMWEN (1<<20) /* RMW Enable */
+
+#define TSI148_LCSR_VMCTRL_ATO_M (7<<16) /* Master Access Time-out Mask
+ */
+#define TSI148_LCSR_VMCTRL_ATO_32 (0<<16) /* 32 us */
+#define TSI148_LCSR_VMCTRL_ATO_128 (1<<16) /* 128 us */
+#define TSI148_LCSR_VMCTRL_ATO_512 (2<<16) /* 512 us */
+#define TSI148_LCSR_VMCTRL_ATO_2M (3<<16) /* 2 ms */
+#define TSI148_LCSR_VMCTRL_ATO_8M (4<<16) /* 8 ms */
+#define TSI148_LCSR_VMCTRL_ATO_32M (5<<16) /* 32 ms */
+#define TSI148_LCSR_VMCTRL_ATO_128M (6<<16) /* 128 ms */
+#define TSI148_LCSR_VMCTRL_ATO_DIS (7<<16) /* Disabled */
+
+#define TSI148_LCSR_VMCTRL_VTOFF_M (7<<12) /* VMEbus Master Time off */
+#define TSI148_LCSR_VMCTRL_VTOFF_0 (0<<12) /* 0us */
+#define TSI148_LCSR_VMCTRL_VTOFF_1 (1<<12) /* 1us */
+#define TSI148_LCSR_VMCTRL_VTOFF_2 (2<<12) /* 2us */
+#define TSI148_LCSR_VMCTRL_VTOFF_4 (3<<12) /* 4us */
+#define TSI148_LCSR_VMCTRL_VTOFF_8 (4<<12) /* 8us */
+#define TSI148_LCSR_VMCTRL_VTOFF_16 (5<<12) /* 16us */
+#define TSI148_LCSR_VMCTRL_VTOFF_32 (6<<12) /* 32us */
+#define TSI148_LCSR_VMCTRL_VTOFF_64 (7<<12) /* 64us */
+
+#define TSI148_LCSR_VMCTRL_VTON_M (7<<8) /* VMEbus Master Time On */
+#define TSI148_LCSR_VMCTRL_VTON_4 (0<<8) /* 8us */
+#define TSI148_LCSR_VMCTRL_VTON_8 (1<<8) /* 8us */
+#define TSI148_LCSR_VMCTRL_VTON_16 (2<<8) /* 16us */
+#define TSI148_LCSR_VMCTRL_VTON_32 (3<<8) /* 32us */
+#define TSI148_LCSR_VMCTRL_VTON_64 (4<<8) /* 64us */
+#define TSI148_LCSR_VMCTRL_VTON_128 (5<<8) /* 128us */
+#define TSI148_LCSR_VMCTRL_VTON_256 (6<<8) /* 256us */
+#define TSI148_LCSR_VMCTRL_VTON_512 (7<<8) /* 512us */
+
+#define TSI148_LCSR_VMCTRL_VREL_M (3<<3) /* VMEbus Master Rel Mode Mask
+ */
+#define TSI148_LCSR_VMCTRL_VREL_T_D (0<<3) /* Time on or Done */
+#define TSI148_LCSR_VMCTRL_VREL_T_R_D (1<<3) /* Time on and REQ or Done */
+#define TSI148_LCSR_VMCTRL_VREL_T_B_D (2<<3) /* Time on and BCLR or Done */
+#define TSI148_LCSR_VMCTRL_VREL_T_D_R (3<<3) /* Time on or Done and REQ */
+
+#define TSI148_LCSR_VMCTRL_VFAIR (1<<2) /* VMEbus Master Fair Mode */
+#define TSI148_LCSR_VMCTRL_VREQL_M (3<<0) /* VMEbus Master Req Level Mask
+ */
+
+/*
+ * VMEbus Control Register CRG+$238
+ */
+#define TSI148_LCSR_VCTRL_LRE (1<<31) /* Late Retry Enable */
+
+#define TSI148_LCSR_VCTRL_DLT_M (0xF<<24) /* Deadlock Timer */
+#define TSI148_LCSR_VCTRL_DLT_OFF (0<<24) /* Deadlock Timer Off */
+#define TSI148_LCSR_VCTRL_DLT_16 (1<<24) /* 16 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_32 (2<<24) /* 32 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_64 (3<<24) /* 64 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_128 (4<<24) /* 128 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_256 (5<<24) /* 256 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_512 (6<<24) /* 512 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_1024 (7<<24) /* 1024 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_2048 (8<<24) /* 2048 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_4096 (9<<24) /* 4096 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_8192 (0xA<<24) /* 8192 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_16384 (0xB<<24) /* 16384 VCLKS */
+#define TSI148_LCSR_VCTRL_DLT_32768 (0xC<<24) /* 32768 VCLKS */
+
+#define TSI148_LCSR_VCTRL_NERBB (1<<20) /* No Early Release of Bus Busy
+ */
+
+#define TSI148_LCSR_VCTRL_SRESET (1<<17) /* System Reset */
+#define TSI148_LCSR_VCTRL_LRESET (1<<16) /* Local Reset */
+
+#define TSI148_LCSR_VCTRL_SFAILAI (1<<15) /* SYSFAIL Auto Slot ID */
+#define TSI148_LCSR_VCTRL_BID_M (0x1F<<8) /* Broadcast ID Mask */
+
+#define TSI148_LCSR_VCTRL_ATOEN (1<<7) /* Arbiter Time-out Enable */
+#define TSI148_LCSR_VCTRL_ROBIN (1<<6) /* VMEbus Round Robin */
+
+#define TSI148_LCSR_VCTRL_GTO_M (7<<0) /* VMEbus Global Time-out Mask
+ */
+#define TSI148_LCSR_VCTRL_GTO_8 (0<<0) /* 8 us */
+#define TSI148_LCSR_VCTRL_GTO_16 (1<<0) /* 16 us */
+#define TSI148_LCSR_VCTRL_GTO_32 (2<<0) /* 32 us */
+#define TSI148_LCSR_VCTRL_GTO_64 (3<<0) /* 64 us */
+#define TSI148_LCSR_VCTRL_GTO_128 (4<<0) /* 128 us */
+#define TSI148_LCSR_VCTRL_GTO_256 (5<<0) /* 256 us */
+#define TSI148_LCSR_VCTRL_GTO_512 (6<<0) /* 512 us */
+#define TSI148_LCSR_VCTRL_GTO_DIS (7<<0) /* Disabled */
+
+/*
+ * VMEbus Status Register CRG + $23C
+ */
+#define TSI148_LCSR_VSTAT_CPURST (1<<15) /* Clear power up reset */
+#define TSI148_LCSR_VSTAT_BRDFL (1<<14) /* Board fail */
+#define TSI148_LCSR_VSTAT_PURSTS (1<<12) /* Power up reset status */
+#define TSI148_LCSR_VSTAT_BDFAILS (1<<11) /* Board Fail Status */
+#define TSI148_LCSR_VSTAT_SYSFAILS (1<<10) /* System Fail Status */
+#define TSI148_LCSR_VSTAT_ACFAILS (1<<9) /* AC fail status */
+#define TSI148_LCSR_VSTAT_SCONS (1<<8) /* System Cont Status */
+#define TSI148_LCSR_VSTAT_GAP (1<<5) /* Geographic Addr Parity */
+#define TSI148_LCSR_VSTAT_GA_M (0x1F<<0) /* Geographic Addr Mask */
+
+/*
+ * PCI Configuration Status Register CRG+$240
+ */
+#define TSI148_LCSR_PSTAT_REQ64S (1<<6) /* Request 64 status set */
+#define TSI148_LCSR_PSTAT_M66ENS (1<<5) /* M66ENS 66Mhz enable */
+#define TSI148_LCSR_PSTAT_FRAMES (1<<4) /* Frame Status */
+#define TSI148_LCSR_PSTAT_IRDYS (1<<3) /* IRDY status */
+#define TSI148_LCSR_PSTAT_DEVSELS (1<<2) /* DEVL status */
+#define TSI148_LCSR_PSTAT_STOPS (1<<1) /* STOP status */
+#define TSI148_LCSR_PSTAT_TRDYS (1<<0) /* TRDY status */
+
+/*
+ * VMEbus Exception Attributes Register CRG + $268
+ */
+#define TSI148_LCSR_VEAT_VES (1<<31) /* Status */
+#define TSI148_LCSR_VEAT_VEOF (1<<30) /* Overflow */
+#define TSI148_LCSR_VEAT_VESCL (1<<29) /* Status Clear */
+#define TSI148_LCSR_VEAT_2EOT (1<<21) /* 2e Odd Termination */
+#define TSI148_LCSR_VEAT_2EST (1<<20) /* 2e Slave terminated */
+#define TSI148_LCSR_VEAT_BERR (1<<19) /* Bus Error */
+#define TSI148_LCSR_VEAT_LWORD (1<<18) /* LWORD_ signal state */
+#define TSI148_LCSR_VEAT_WRITE (1<<17) /* WRITE_ signal state */
+#define TSI148_LCSR_VEAT_IACK (1<<16) /* IACK_ signal state */
+#define TSI148_LCSR_VEAT_DS1 (1<<15) /* DS1_ signal state */
+#define TSI148_LCSR_VEAT_DS0 (1<<14) /* DS0_ signal state */
+#define TSI148_LCSR_VEAT_AM_M (0x3F<<8) /* Address Mode Mask */
+#define TSI148_LCSR_VEAT_XAM_M (0xFF<<0) /* Master AMode Mask */
+
+
+/*
+ * VMEbus PCI Error Diagnostics PCI/X Attributes Register CRG + $280
+ */
+#define TSI148_LCSR_EDPAT_EDPCL (1<<29)
+
+/*
+ * Inbound Translation Starting Address Lower
+ */
+#define TSI148_LCSR_ITSAL6432_M (0xFFFF<<16) /* Mask */
+#define TSI148_LCSR_ITSAL24_M (0x00FFF<<12) /* Mask */
+#define TSI148_LCSR_ITSAL16_M (0x0000FFF<<4) /* Mask */
+
+/*
+ * Inbound Translation Ending Address Lower
+ */
+#define TSI148_LCSR_ITEAL6432_M (0xFFFF<<16) /* Mask */
+#define TSI148_LCSR_ITEAL24_M (0x00FFF<<12) /* Mask */
+#define TSI148_LCSR_ITEAL16_M (0x0000FFF<<4) /* Mask */
+
+/*
+ * Inbound Translation Offset Lower
+ */
+#define TSI148_LCSR_ITOFFL6432_M (0xFFFF<<16) /* Mask */
+#define TSI148_LCSR_ITOFFL24_M (0xFFFFF<<12) /* Mask */
+#define TSI148_LCSR_ITOFFL16_M (0xFFFFFFF<<4) /* Mask */
+
+/*
+ * Inbound Translation Attribute
+ */
+#define TSI148_LCSR_ITAT_EN (1<<31) /* Window Enable */
+#define TSI148_LCSR_ITAT_TH (1<<18) /* Prefetch Threshold */
+
+#define TSI148_LCSR_ITAT_VFS_M (3<<16) /* Virtual FIFO Size Mask */
+#define TSI148_LCSR_ITAT_VFS_64 (0<<16) /* 64 bytes Virtual FIFO Size */
+#define TSI148_LCSR_ITAT_VFS_128 (1<<16) /* 128 bytes Virtual FIFO Sz */
+#define TSI148_LCSR_ITAT_VFS_256 (2<<16) /* 256 bytes Virtual FIFO Sz */
+#define TSI148_LCSR_ITAT_VFS_512 (3<<16) /* 512 bytes Virtual FIFO Sz */
+
+#define TSI148_LCSR_ITAT_2eSSTM_M (7<<12) /* 2eSST Xfer Rate Mask */
+#define TSI148_LCSR_ITAT_2eSSTM_160 (0<<12) /* 160MB/s 2eSST Xfer Rate */
+#define TSI148_LCSR_ITAT_2eSSTM_267 (1<<12) /* 267MB/s 2eSST Xfer Rate */
+#define TSI148_LCSR_ITAT_2eSSTM_320 (2<<12) /* 320MB/s 2eSST Xfer Rate */
+
+#define TSI148_LCSR_ITAT_2eSSTB (1<<11) /* 2eSST Bcast Xfer Protocol */
+#define TSI148_LCSR_ITAT_2eSST (1<<10) /* 2eSST Xfer Protocol */
+#define TSI148_LCSR_ITAT_2eVME (1<<9) /* 2eVME Xfer Protocol */
+#define TSI148_LCSR_ITAT_MBLT (1<<8) /* MBLT Xfer Protocol */
+#define TSI148_LCSR_ITAT_BLT (1<<7) /* BLT Xfer Protocol */
+
+#define TSI148_LCSR_ITAT_AS_M (7<<4) /* Address Space Mask */
+#define TSI148_LCSR_ITAT_AS_A16 (0<<4) /* A16 Address Space */
+#define TSI148_LCSR_ITAT_AS_A24 (1<<4) /* A24 Address Space */
+#define TSI148_LCSR_ITAT_AS_A32 (2<<4) /* A32 Address Space */
+#define TSI148_LCSR_ITAT_AS_A64 (4<<4) /* A64 Address Space */
+
+#define TSI148_LCSR_ITAT_SUPR (1<<3) /* Supervisor Access */
+#define TSI148_LCSR_ITAT_NPRIV (1<<2) /* Non-Priv (User) Access */
+#define TSI148_LCSR_ITAT_PGM (1<<1) /* Program Access */
+#define TSI148_LCSR_ITAT_DATA (1<<0) /* Data Access */
+
+/*
+ * GCSR Base Address Lower Address CRG +$404
+ */
+#define TSI148_LCSR_GBAL_M (0x7FFFFFF<<5) /* Mask */
+
+/*
+ * GCSR Attribute Register CRG + $408
+ */
+#define TSI148_LCSR_GCSRAT_EN (1<<7) /* Enable access to GCSR */
+
+#define TSI148_LCSR_GCSRAT_AS_M (7<<4) /* Address Space Mask */
+#define TSI148_LCSR_GCSRAT_AS_A16 (0<<4) /* Address Space 16 */
+#define TSI148_LCSR_GCSRAT_AS_A24 (1<<4) /* Address Space 24 */
+#define TSI148_LCSR_GCSRAT_AS_A32 (2<<4) /* Address Space 32 */
+#define TSI148_LCSR_GCSRAT_AS_A64 (4<<4) /* Address Space 64 */
+
+#define TSI148_LCSR_GCSRAT_SUPR (1<<3) /* Sup set -GCSR decoder */
+#define TSI148_LCSR_GCSRAT_NPRIV (1<<2) /* Non-Privliged set - CGSR */
+#define TSI148_LCSR_GCSRAT_PGM (1<<1) /* Program set - GCSR decoder */
+#define TSI148_LCSR_GCSRAT_DATA (1<<0) /* DATA set GCSR decoder */
+
+/*
+ * CRG Base Address Lower Address CRG + $410
+ */
+#define TSI148_LCSR_CBAL_M (0xFFFFF<<12)
+
+/*
+ * CRG Attribute Register CRG + $414
+ */
+#define TSI148_LCSR_CRGAT_EN (1<<7) /* Enable PRG Access */
+
+#define TSI148_LCSR_CRGAT_AS_M (7<<4) /* Address Space */
+#define TSI148_LCSR_CRGAT_AS_A16 (0<<4) /* Address Space 16 */
+#define TSI148_LCSR_CRGAT_AS_A24 (1<<4) /* Address Space 24 */
+#define TSI148_LCSR_CRGAT_AS_A32 (2<<4) /* Address Space 32 */
+#define TSI148_LCSR_CRGAT_AS_A64 (4<<4) /* Address Space 64 */
+
+#define TSI148_LCSR_CRGAT_SUPR (1<<3) /* Supervisor Access */
+#define TSI148_LCSR_CRGAT_NPRIV (1<<2) /* Non-Privliged(User) Access */
+#define TSI148_LCSR_CRGAT_PGM (1<<1) /* Program Access */
+#define TSI148_LCSR_CRGAT_DATA (1<<0) /* Data Access */
+
+/*
+ * CR/CSR Offset Lower Register CRG + $41C
+ */
+#define TSI148_LCSR_CROL_M (0x1FFF<<19) /* Mask */
+
+/*
+ * CR/CSR Attribute register CRG + $420
+ */
+#define TSI148_LCSR_CRAT_EN (1<<7) /* Enable access to CR/CSR */
+
+/*
+ * Location Monitor base address lower register CRG + $428
+ */
+#define TSI148_LCSR_LMBAL_M (0x7FFFFFF<<5) /* Mask */
+
+/*
+ * Location Monitor Attribute Register CRG + $42C
+ */
+#define TSI148_LCSR_LMAT_EN (1<<7) /* Enable Location Monitor */
+
+#define TSI148_LCSR_LMAT_AS_M (7<<4) /* Address Space MASK */
+#define TSI148_LCSR_LMAT_AS_A16 (0<<4) /* A16 */
+#define TSI148_LCSR_LMAT_AS_A24 (1<<4) /* A24 */
+#define TSI148_LCSR_LMAT_AS_A32 (2<<4) /* A32 */
+#define TSI148_LCSR_LMAT_AS_A64 (4<<4) /* A64 */
+
+#define TSI148_LCSR_LMAT_SUPR (1<<3) /* Supervisor Access */
+#define TSI148_LCSR_LMAT_NPRIV (1<<2) /* Non-Priv (User) Access */
+#define TSI148_LCSR_LMAT_PGM (1<<1) /* Program Access */
+#define TSI148_LCSR_LMAT_DATA (1<<0) /* Data Access */
+
+/*
+ * Broadcast Pulse Generator Timer Register CRG + $438
+ */
+#define TSI148_LCSR_BPGTR_BPGT_M (0xFFFF<<0) /* Mask */
+
+/*
+ * Broadcast Programmable Clock Timer Register CRG + $43C
+ */
+#define TSI148_LCSR_BPCTR_BPCT_M (0xFFFFFF<<0) /* Mask */
+
+/*
+ * VMEbus Interrupt Control Register CRG + $43C
+ */
+#define TSI148_LCSR_VICR_CNTS_M (3<<22) /* Cntr Source MASK */
+#define TSI148_LCSR_VICR_CNTS_DIS (1<<22) /* Cntr Disable */
+#define TSI148_LCSR_VICR_CNTS_IRQ1 (2<<22) /* IRQ1 to Cntr */
+#define TSI148_LCSR_VICR_CNTS_IRQ2 (3<<22) /* IRQ2 to Cntr */
+
+#define TSI148_LCSR_VICR_EDGIS_M (3<<20) /* Edge interrupt MASK */
+#define TSI148_LCSR_VICR_EDGIS_DIS (1<<20) /* Edge interrupt Disable */
+#define TSI148_LCSR_VICR_EDGIS_IRQ1 (2<<20) /* IRQ1 to Edge */
+#define TSI148_LCSR_VICR_EDGIS_IRQ2 (3<<20) /* IRQ2 to Edge */
+
+#define TSI148_LCSR_VICR_IRQIF_M (3<<18) /* IRQ1* Function MASK */
+#define TSI148_LCSR_VICR_IRQIF_NORM (1<<18) /* Normal */
+#define TSI148_LCSR_VICR_IRQIF_PULSE (2<<18) /* Pulse Generator */
+#define TSI148_LCSR_VICR_IRQIF_PROG (3<<18) /* Programmable Clock */
+#define TSI148_LCSR_VICR_IRQIF_1U (4<<18) /* 1us Clock */
+
+#define TSI148_LCSR_VICR_IRQ2F_M (3<<16) /* IRQ2* Function MASK */
+#define TSI148_LCSR_VICR_IRQ2F_NORM (1<<16) /* Normal */
+#define TSI148_LCSR_VICR_IRQ2F_PULSE (2<<16) /* Pulse Generator */
+#define TSI148_LCSR_VICR_IRQ2F_PROG (3<<16) /* Programmable Clock */
+#define TSI148_LCSR_VICR_IRQ2F_1U (4<<16) /* 1us Clock */
+
+#define TSI148_LCSR_VICR_BIP (1<<15) /* Broadcast Interrupt Pulse */
+
+#define TSI148_LCSR_VICR_IRQC (1<<12) /* VMEbus IRQ Clear */
+#define TSI148_LCSR_VICR_IRQS (1<<11) /* VMEbus IRQ Status */
+
+#define TSI148_LCSR_VICR_IRQL_M (7<<8) /* VMEbus SW IRQ Level Mask */
+#define TSI148_LCSR_VICR_IRQL_1 (1<<8) /* VMEbus SW IRQ Level 1 */
+#define TSI148_LCSR_VICR_IRQL_2 (2<<8) /* VMEbus SW IRQ Level 2 */
+#define TSI148_LCSR_VICR_IRQL_3 (3<<8) /* VMEbus SW IRQ Level 3 */
+#define TSI148_LCSR_VICR_IRQL_4 (4<<8) /* VMEbus SW IRQ Level 4 */
+#define TSI148_LCSR_VICR_IRQL_5 (5<<8) /* VMEbus SW IRQ Level 5 */
+#define TSI148_LCSR_VICR_IRQL_6 (6<<8) /* VMEbus SW IRQ Level 6 */
+#define TSI148_LCSR_VICR_IRQL_7 (7<<8) /* VMEbus SW IRQ Level 7 */
+
+static const int TSI148_LCSR_VICR_IRQL[8] = { 0, TSI148_LCSR_VICR_IRQL_1,
+ TSI148_LCSR_VICR_IRQL_2, TSI148_LCSR_VICR_IRQL_3,
+ TSI148_LCSR_VICR_IRQL_4, TSI148_LCSR_VICR_IRQL_5,
+ TSI148_LCSR_VICR_IRQL_6, TSI148_LCSR_VICR_IRQL_7 };
+
+#define TSI148_LCSR_VICR_STID_M (0xFF<<0) /* Status/ID Mask */
+
+/*
+ * Interrupt Enable Register CRG + $440
+ */
+#define TSI148_LCSR_INTEN_DMA1EN (1<<25) /* DMAC 1 */
+#define TSI148_LCSR_INTEN_DMA0EN (1<<24) /* DMAC 0 */
+#define TSI148_LCSR_INTEN_LM3EN (1<<23) /* Location Monitor 3 */
+#define TSI148_LCSR_INTEN_LM2EN (1<<22) /* Location Monitor 2 */
+#define TSI148_LCSR_INTEN_LM1EN (1<<21) /* Location Monitor 1 */
+#define TSI148_LCSR_INTEN_LM0EN (1<<20) /* Location Monitor 0 */
+#define TSI148_LCSR_INTEN_MB3EN (1<<19) /* Mail Box 3 */
+#define TSI148_LCSR_INTEN_MB2EN (1<<18) /* Mail Box 2 */
+#define TSI148_LCSR_INTEN_MB1EN (1<<17) /* Mail Box 1 */
+#define TSI148_LCSR_INTEN_MB0EN (1<<16) /* Mail Box 0 */
+#define TSI148_LCSR_INTEN_PERREN (1<<13) /* PCI/X Error */
+#define TSI148_LCSR_INTEN_VERREN (1<<12) /* VMEbus Error */
+#define TSI148_LCSR_INTEN_VIEEN (1<<11) /* VMEbus IRQ Edge */
+#define TSI148_LCSR_INTEN_IACKEN (1<<10) /* IACK */
+#define TSI148_LCSR_INTEN_SYSFLEN (1<<9) /* System Fail */
+#define TSI148_LCSR_INTEN_ACFLEN (1<<8) /* AC Fail */
+#define TSI148_LCSR_INTEN_IRQ7EN (1<<7) /* IRQ7 */
+#define TSI148_LCSR_INTEN_IRQ6EN (1<<6) /* IRQ6 */
+#define TSI148_LCSR_INTEN_IRQ5EN (1<<5) /* IRQ5 */
+#define TSI148_LCSR_INTEN_IRQ4EN (1<<4) /* IRQ4 */
+#define TSI148_LCSR_INTEN_IRQ3EN (1<<3) /* IRQ3 */
+#define TSI148_LCSR_INTEN_IRQ2EN (1<<2) /* IRQ2 */
+#define TSI148_LCSR_INTEN_IRQ1EN (1<<1) /* IRQ1 */
+
+static const int TSI148_LCSR_INTEN_LMEN[4] = { TSI148_LCSR_INTEN_LM0EN,
+ TSI148_LCSR_INTEN_LM1EN,
+ TSI148_LCSR_INTEN_LM2EN,
+ TSI148_LCSR_INTEN_LM3EN };
+
+static const int TSI148_LCSR_INTEN_IRQEN[7] = { TSI148_LCSR_INTEN_IRQ1EN,
+ TSI148_LCSR_INTEN_IRQ2EN,
+ TSI148_LCSR_INTEN_IRQ3EN,
+ TSI148_LCSR_INTEN_IRQ4EN,
+ TSI148_LCSR_INTEN_IRQ5EN,
+ TSI148_LCSR_INTEN_IRQ6EN,
+ TSI148_LCSR_INTEN_IRQ7EN };
+
+/*
+ * Interrupt Enable Out Register CRG + $444
+ */
+#define TSI148_LCSR_INTEO_DMA1EO (1<<25) /* DMAC 1 */
+#define TSI148_LCSR_INTEO_DMA0EO (1<<24) /* DMAC 0 */
+#define TSI148_LCSR_INTEO_LM3EO (1<<23) /* Loc Monitor 3 */
+#define TSI148_LCSR_INTEO_LM2EO (1<<22) /* Loc Monitor 2 */
+#define TSI148_LCSR_INTEO_LM1EO (1<<21) /* Loc Monitor 1 */
+#define TSI148_LCSR_INTEO_LM0EO (1<<20) /* Location Monitor 0 */
+#define TSI148_LCSR_INTEO_MB3EO (1<<19) /* Mail Box 3 */
+#define TSI148_LCSR_INTEO_MB2EO (1<<18) /* Mail Box 2 */
+#define TSI148_LCSR_INTEO_MB1EO (1<<17) /* Mail Box 1 */
+#define TSI148_LCSR_INTEO_MB0EO (1<<16) /* Mail Box 0 */
+#define TSI148_LCSR_INTEO_PERREO (1<<13) /* PCI/X Error */
+#define TSI148_LCSR_INTEO_VERREO (1<<12) /* VMEbus Error */
+#define TSI148_LCSR_INTEO_VIEEO (1<<11) /* VMEbus IRQ Edge */
+#define TSI148_LCSR_INTEO_IACKEO (1<<10) /* IACK */
+#define TSI148_LCSR_INTEO_SYSFLEO (1<<9) /* System Fail */
+#define TSI148_LCSR_INTEO_ACFLEO (1<<8) /* AC Fail */
+#define TSI148_LCSR_INTEO_IRQ7EO (1<<7) /* IRQ7 */
+#define TSI148_LCSR_INTEO_IRQ6EO (1<<6) /* IRQ6 */
+#define TSI148_LCSR_INTEO_IRQ5EO (1<<5) /* IRQ5 */
+#define TSI148_LCSR_INTEO_IRQ4EO (1<<4) /* IRQ4 */
+#define TSI148_LCSR_INTEO_IRQ3EO (1<<3) /* IRQ3 */
+#define TSI148_LCSR_INTEO_IRQ2EO (1<<2) /* IRQ2 */
+#define TSI148_LCSR_INTEO_IRQ1EO (1<<1) /* IRQ1 */
+
+static const int TSI148_LCSR_INTEO_LMEO[4] = { TSI148_LCSR_INTEO_LM0EO,
+ TSI148_LCSR_INTEO_LM1EO,
+ TSI148_LCSR_INTEO_LM2EO,
+ TSI148_LCSR_INTEO_LM3EO };
+
+static const int TSI148_LCSR_INTEO_IRQEO[7] = { TSI148_LCSR_INTEO_IRQ1EO,
+ TSI148_LCSR_INTEO_IRQ2EO,
+ TSI148_LCSR_INTEO_IRQ3EO,
+ TSI148_LCSR_INTEO_IRQ4EO,
+ TSI148_LCSR_INTEO_IRQ5EO,
+ TSI148_LCSR_INTEO_IRQ6EO,
+ TSI148_LCSR_INTEO_IRQ7EO };
+
+/*
+ * Interrupt Status Register CRG + $448
+ */
+#define TSI148_LCSR_INTS_DMA1S (1<<25) /* DMA 1 */
+#define TSI148_LCSR_INTS_DMA0S (1<<24) /* DMA 0 */
+#define TSI148_LCSR_INTS_LM3S (1<<23) /* Location Monitor 3 */
+#define TSI148_LCSR_INTS_LM2S (1<<22) /* Location Monitor 2 */
+#define TSI148_LCSR_INTS_LM1S (1<<21) /* Location Monitor 1 */
+#define TSI148_LCSR_INTS_LM0S (1<<20) /* Location Monitor 0 */
+#define TSI148_LCSR_INTS_MB3S (1<<19) /* Mail Box 3 */
+#define TSI148_LCSR_INTS_MB2S (1<<18) /* Mail Box 2 */
+#define TSI148_LCSR_INTS_MB1S (1<<17) /* Mail Box 1 */
+#define TSI148_LCSR_INTS_MB0S (1<<16) /* Mail Box 0 */
+#define TSI148_LCSR_INTS_PERRS (1<<13) /* PCI/X Error */
+#define TSI148_LCSR_INTS_VERRS (1<<12) /* VMEbus Error */
+#define TSI148_LCSR_INTS_VIES (1<<11) /* VMEbus IRQ Edge */
+#define TSI148_LCSR_INTS_IACKS (1<<10) /* IACK */
+#define TSI148_LCSR_INTS_SYSFLS (1<<9) /* System Fail */
+#define TSI148_LCSR_INTS_ACFLS (1<<8) /* AC Fail */
+#define TSI148_LCSR_INTS_IRQ7S (1<<7) /* IRQ7 */
+#define TSI148_LCSR_INTS_IRQ6S (1<<6) /* IRQ6 */
+#define TSI148_LCSR_INTS_IRQ5S (1<<5) /* IRQ5 */
+#define TSI148_LCSR_INTS_IRQ4S (1<<4) /* IRQ4 */
+#define TSI148_LCSR_INTS_IRQ3S (1<<3) /* IRQ3 */
+#define TSI148_LCSR_INTS_IRQ2S (1<<2) /* IRQ2 */
+#define TSI148_LCSR_INTS_IRQ1S (1<<1) /* IRQ1 */
+
+static const int TSI148_LCSR_INTS_LMS[4] = { TSI148_LCSR_INTS_LM0S,
+ TSI148_LCSR_INTS_LM1S,
+ TSI148_LCSR_INTS_LM2S,
+ TSI148_LCSR_INTS_LM3S };
+
+static const int TSI148_LCSR_INTS_MBS[4] = { TSI148_LCSR_INTS_MB0S,
+ TSI148_LCSR_INTS_MB1S,
+ TSI148_LCSR_INTS_MB2S,
+ TSI148_LCSR_INTS_MB3S };
+
+/*
+ * Interrupt Clear Register CRG + $44C
+ */
+#define TSI148_LCSR_INTC_DMA1C (1<<25) /* DMA 1 */
+#define TSI148_LCSR_INTC_DMA0C (1<<24) /* DMA 0 */
+#define TSI148_LCSR_INTC_LM3C (1<<23) /* Location Monitor 3 */
+#define TSI148_LCSR_INTC_LM2C (1<<22) /* Location Monitor 2 */
+#define TSI148_LCSR_INTC_LM1C (1<<21) /* Location Monitor 1 */
+#define TSI148_LCSR_INTC_LM0C (1<<20) /* Location Monitor 0 */
+#define TSI148_LCSR_INTC_MB3C (1<<19) /* Mail Box 3 */
+#define TSI148_LCSR_INTC_MB2C (1<<18) /* Mail Box 2 */
+#define TSI148_LCSR_INTC_MB1C (1<<17) /* Mail Box 1 */
+#define TSI148_LCSR_INTC_MB0C (1<<16) /* Mail Box 0 */
+#define TSI148_LCSR_INTC_PERRC (1<<13) /* VMEbus Error */
+#define TSI148_LCSR_INTC_VERRC (1<<12) /* VMEbus Access Time-out */
+#define TSI148_LCSR_INTC_VIEC (1<<11) /* VMEbus IRQ Edge */
+#define TSI148_LCSR_INTC_IACKC (1<<10) /* IACK */
+#define TSI148_LCSR_INTC_SYSFLC (1<<9) /* System Fail */
+#define TSI148_LCSR_INTC_ACFLC (1<<8) /* AC Fail */
+
+static const int TSI148_LCSR_INTC_LMC[4] = { TSI148_LCSR_INTC_LM0C,
+ TSI148_LCSR_INTC_LM1C,
+ TSI148_LCSR_INTC_LM2C,
+ TSI148_LCSR_INTC_LM3C };
+
+static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C,
+ TSI148_LCSR_INTC_MB1C,
+ TSI148_LCSR_INTC_MB2C,
+ TSI148_LCSR_INTC_MB3C };
+
+/*
+ * Interrupt Map Register 1 CRG + $458
+ */
+#define TSI148_LCSR_INTM1_DMA1M_M (3<<18) /* DMA 1 */
+#define TSI148_LCSR_INTM1_DMA0M_M (3<<16) /* DMA 0 */
+#define TSI148_LCSR_INTM1_LM3M_M (3<<14) /* Location Monitor 3 */
+#define TSI148_LCSR_INTM1_LM2M_M (3<<12) /* Location Monitor 2 */
+#define TSI148_LCSR_INTM1_LM1M_M (3<<10) /* Location Monitor 1 */
+#define TSI148_LCSR_INTM1_LM0M_M (3<<8) /* Location Monitor 0 */
+#define TSI148_LCSR_INTM1_MB3M_M (3<<6) /* Mail Box 3 */
+#define TSI148_LCSR_INTM1_MB2M_M (3<<4) /* Mail Box 2 */
+#define TSI148_LCSR_INTM1_MB1M_M (3<<2) /* Mail Box 1 */
+#define TSI148_LCSR_INTM1_MB0M_M (3<<0) /* Mail Box 0 */
+
+/*
+ * Interrupt Map Register 2 CRG + $45C
+ */
+#define TSI148_LCSR_INTM2_PERRM_M (3<<26) /* PCI Bus Error */
+#define TSI148_LCSR_INTM2_VERRM_M (3<<24) /* VMEbus Error */
+#define TSI148_LCSR_INTM2_VIEM_M (3<<22) /* VMEbus IRQ Edge */
+#define TSI148_LCSR_INTM2_IACKM_M (3<<20) /* IACK */
+#define TSI148_LCSR_INTM2_SYSFLM_M (3<<18) /* System Fail */
+#define TSI148_LCSR_INTM2_ACFLM_M (3<<16) /* AC Fail */
+#define TSI148_LCSR_INTM2_IRQ7M_M (3<<14) /* IRQ7 */
+#define TSI148_LCSR_INTM2_IRQ6M_M (3<<12) /* IRQ6 */
+#define TSI148_LCSR_INTM2_IRQ5M_M (3<<10) /* IRQ5 */
+#define TSI148_LCSR_INTM2_IRQ4M_M (3<<8) /* IRQ4 */
+#define TSI148_LCSR_INTM2_IRQ3M_M (3<<6) /* IRQ3 */
+#define TSI148_LCSR_INTM2_IRQ2M_M (3<<4) /* IRQ2 */
+#define TSI148_LCSR_INTM2_IRQ1M_M (3<<2) /* IRQ1 */
+
+/*
+ * DMA Control (0-1) Registers CRG + $500
+ */
+#define TSI148_LCSR_DCTL_ABT (1<<27) /* Abort */
+#define TSI148_LCSR_DCTL_PAU (1<<26) /* Pause */
+#define TSI148_LCSR_DCTL_DGO (1<<25) /* DMA Go */
+
+#define TSI148_LCSR_DCTL_MOD (1<<23) /* Mode */
+
+#define TSI148_LCSR_DCTL_VBKS_M (7<<12) /* VMEbus block Size MASK */
+#define TSI148_LCSR_DCTL_VBKS_32 (0<<12) /* VMEbus block Size 32 */
+#define TSI148_LCSR_DCTL_VBKS_64 (1<<12) /* VMEbus block Size 64 */
+#define TSI148_LCSR_DCTL_VBKS_128 (2<<12) /* VMEbus block Size 128 */
+#define TSI148_LCSR_DCTL_VBKS_256 (3<<12) /* VMEbus block Size 256 */
+#define TSI148_LCSR_DCTL_VBKS_512 (4<<12) /* VMEbus block Size 512 */
+#define TSI148_LCSR_DCTL_VBKS_1024 (5<<12) /* VMEbus block Size 1024 */
+#define TSI148_LCSR_DCTL_VBKS_2048 (6<<12) /* VMEbus block Size 2048 */
+#define TSI148_LCSR_DCTL_VBKS_4096 (7<<12) /* VMEbus block Size 4096 */
+
+#define TSI148_LCSR_DCTL_VBOT_M (7<<8) /* VMEbus back-off MASK */
+#define TSI148_LCSR_DCTL_VBOT_0 (0<<8) /* VMEbus back-off 0us */
+#define TSI148_LCSR_DCTL_VBOT_1 (1<<8) /* VMEbus back-off 1us */
+#define TSI148_LCSR_DCTL_VBOT_2 (2<<8) /* VMEbus back-off 2us */
+#define TSI148_LCSR_DCTL_VBOT_4 (3<<8) /* VMEbus back-off 4us */
+#define TSI148_LCSR_DCTL_VBOT_8 (4<<8) /* VMEbus back-off 8us */
+#define TSI148_LCSR_DCTL_VBOT_16 (5<<8) /* VMEbus back-off 16us */
+#define TSI148_LCSR_DCTL_VBOT_32 (6<<8) /* VMEbus back-off 32us */
+#define TSI148_LCSR_DCTL_VBOT_64 (7<<8) /* VMEbus back-off 64us */
+
+#define TSI148_LCSR_DCTL_PBKS_M (7<<4) /* PCI block size MASK */
+#define TSI148_LCSR_DCTL_PBKS_32 (0<<4) /* PCI block size 32 bytes */
+#define TSI148_LCSR_DCTL_PBKS_64 (1<<4) /* PCI block size 64 bytes */
+#define TSI148_LCSR_DCTL_PBKS_128 (2<<4) /* PCI block size 128 bytes */
+#define TSI148_LCSR_DCTL_PBKS_256 (3<<4) /* PCI block size 256 bytes */
+#define TSI148_LCSR_DCTL_PBKS_512 (4<<4) /* PCI block size 512 bytes */
+#define TSI148_LCSR_DCTL_PBKS_1024 (5<<4) /* PCI block size 1024 bytes */
+#define TSI148_LCSR_DCTL_PBKS_2048 (6<<4) /* PCI block size 2048 bytes */
+#define TSI148_LCSR_DCTL_PBKS_4096 (7<<4) /* PCI block size 4096 bytes */
+
+#define TSI148_LCSR_DCTL_PBOT_M (7<<0) /* PCI back off MASK */
+#define TSI148_LCSR_DCTL_PBOT_0 (0<<0) /* PCI back off 0us */
+#define TSI148_LCSR_DCTL_PBOT_1 (1<<0) /* PCI back off 1us */
+#define TSI148_LCSR_DCTL_PBOT_2 (2<<0) /* PCI back off 2us */
+#define TSI148_LCSR_DCTL_PBOT_4 (3<<0) /* PCI back off 3us */
+#define TSI148_LCSR_DCTL_PBOT_8 (4<<0) /* PCI back off 4us */
+#define TSI148_LCSR_DCTL_PBOT_16 (5<<0) /* PCI back off 8us */
+#define TSI148_LCSR_DCTL_PBOT_32 (6<<0) /* PCI back off 16us */
+#define TSI148_LCSR_DCTL_PBOT_64 (7<<0) /* PCI back off 32us */
+
+/*
+ * DMA Status Registers (0-1) CRG + $504
+ */
+#define TSI148_LCSR_DSTA_SMA (1<<31) /* PCI Signalled Master Abt */
+#define TSI148_LCSR_DSTA_RTA (1<<30) /* PCI Received Target Abt */
+#define TSI148_LCSR_DSTA_MRC (1<<29) /* PCI Max Retry Count */
+#define TSI148_LCSR_DSTA_VBE (1<<28) /* VMEbus error */
+#define TSI148_LCSR_DSTA_ABT (1<<27) /* Abort */
+#define TSI148_LCSR_DSTA_PAU (1<<26) /* Pause */
+#define TSI148_LCSR_DSTA_DON (1<<25) /* Done */
+#define TSI148_LCSR_DSTA_BSY (1<<24) /* Busy */
+
+/*
+ * DMA Current Link Address Lower (0-1)
+ */
+#define TSI148_LCSR_DCLAL_M (0x3FFFFFF<<6) /* Mask */
+
+/*
+ * DMA Source Attribute (0-1) Reg
+ */
+#define TSI148_LCSR_DSAT_TYP_M (3<<28) /* Source Bus Type */
+#define TSI148_LCSR_DSAT_TYP_PCI (0<<28) /* PCI Bus */
+#define TSI148_LCSR_DSAT_TYP_VME (1<<28) /* VMEbus */
+#define TSI148_LCSR_DSAT_TYP_PAT (2<<28) /* Data Pattern */
+
+#define TSI148_LCSR_DSAT_PSZ (1<<25) /* Pattern Size */
+#define TSI148_LCSR_DSAT_NIN (1<<24) /* No Increment */
+
+#define TSI148_LCSR_DSAT_2eSSTM_M (3<<11) /* 2eSST Trans Rate Mask */
+#define TSI148_LCSR_DSAT_2eSSTM_160 (0<<11) /* 160 MB/s */
+#define TSI148_LCSR_DSAT_2eSSTM_267 (1<<11) /* 267 MB/s */
+#define TSI148_LCSR_DSAT_2eSSTM_320 (2<<11) /* 320 MB/s */
+
+#define TSI148_LCSR_DSAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */
+#define TSI148_LCSR_DSAT_TM_SCT (0<<8) /* SCT */
+#define TSI148_LCSR_DSAT_TM_BLT (1<<8) /* BLT */
+#define TSI148_LCSR_DSAT_TM_MBLT (2<<8) /* MBLT */
+#define TSI148_LCSR_DSAT_TM_2eVME (3<<8) /* 2eVME */
+#define TSI148_LCSR_DSAT_TM_2eSST (4<<8) /* 2eSST */
+#define TSI148_LCSR_DSAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */
+
+#define TSI148_LCSR_DSAT_DBW_M (3<<6) /* Max Data Width MASK */
+#define TSI148_LCSR_DSAT_DBW_16 (0<<6) /* 16 Bits */
+#define TSI148_LCSR_DSAT_DBW_32 (1<<6) /* 32 Bits */
+
+#define TSI148_LCSR_DSAT_SUP (1<<5) /* Supervisory Mode */
+#define TSI148_LCSR_DSAT_PGM (1<<4) /* Program Mode */
+
+#define TSI148_LCSR_DSAT_AMODE_M (0xf<<0) /* Address Space Mask */
+#define TSI148_LCSR_DSAT_AMODE_A16 (0<<0) /* A16 */
+#define TSI148_LCSR_DSAT_AMODE_A24 (1<<0) /* A24 */
+#define TSI148_LCSR_DSAT_AMODE_A32 (2<<0) /* A32 */
+#define TSI148_LCSR_DSAT_AMODE_A64 (4<<0) /* A64 */
+#define TSI148_LCSR_DSAT_AMODE_CRCSR (5<<0) /* CR/CSR */
+#define TSI148_LCSR_DSAT_AMODE_USER1 (8<<0) /* User1 */
+#define TSI148_LCSR_DSAT_AMODE_USER2 (9<<0) /* User2 */
+#define TSI148_LCSR_DSAT_AMODE_USER3 (0xa<<0) /* User3 */
+#define TSI148_LCSR_DSAT_AMODE_USER4 (0xb<<0) /* User4 */
+
+/*
+ * DMA Destination Attribute Registers (0-1)
+ */
+#define TSI148_LCSR_DDAT_TYP_PCI (0<<28) /* Destination PCI Bus */
+#define TSI148_LCSR_DDAT_TYP_VME (1<<28) /* Destination VMEbus */
+
+#define TSI148_LCSR_DDAT_2eSSTM_M (3<<11) /* 2eSST Transfer Rate Mask */
+#define TSI148_LCSR_DDAT_2eSSTM_160 (0<<11) /* 160 MB/s */
+#define TSI148_LCSR_DDAT_2eSSTM_267 (1<<11) /* 267 MB/s */
+#define TSI148_LCSR_DDAT_2eSSTM_320 (2<<11) /* 320 MB/s */
+
+#define TSI148_LCSR_DDAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */
+#define TSI148_LCSR_DDAT_TM_SCT (0<<8) /* SCT */
+#define TSI148_LCSR_DDAT_TM_BLT (1<<8) /* BLT */
+#define TSI148_LCSR_DDAT_TM_MBLT (2<<8) /* MBLT */
+#define TSI148_LCSR_DDAT_TM_2eVME (3<<8) /* 2eVME */
+#define TSI148_LCSR_DDAT_TM_2eSST (4<<8) /* 2eSST */
+#define TSI148_LCSR_DDAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */
+
+#define TSI148_LCSR_DDAT_DBW_M (3<<6) /* Max Data Width MASK */
+#define TSI148_LCSR_DDAT_DBW_16 (0<<6) /* 16 Bits */
+#define TSI148_LCSR_DDAT_DBW_32 (1<<6) /* 32 Bits */
+
+#define TSI148_LCSR_DDAT_SUP (1<<5) /* Supervisory/User Access */
+#define TSI148_LCSR_DDAT_PGM (1<<4) /* Program/Data Access */
+
+#define TSI148_LCSR_DDAT_AMODE_M (0xf<<0) /* Address Space Mask */
+#define TSI148_LCSR_DDAT_AMODE_A16 (0<<0) /* A16 */
+#define TSI148_LCSR_DDAT_AMODE_A24 (1<<0) /* A24 */
+#define TSI148_LCSR_DDAT_AMODE_A32 (2<<0) /* A32 */
+#define TSI148_LCSR_DDAT_AMODE_A64 (4<<0) /* A64 */
+#define TSI148_LCSR_DDAT_AMODE_CRCSR (5<<0) /* CRC/SR */
+#define TSI148_LCSR_DDAT_AMODE_USER1 (8<<0) /* User1 */
+#define TSI148_LCSR_DDAT_AMODE_USER2 (9<<0) /* User2 */
+#define TSI148_LCSR_DDAT_AMODE_USER3 (0xa<<0) /* User3 */
+#define TSI148_LCSR_DDAT_AMODE_USER4 (0xb<<0) /* User4 */
+
+/*
+ * DMA Next Link Address Lower
+ */
+#define TSI148_LCSR_DNLAL_DNLAL_M (0x3FFFFFF<<6) /* Address Mask */
+#define TSI148_LCSR_DNLAL_LLA (1<<0) /* Last Link Address Indicator */
+
+/*
+ * DMA 2eSST Broadcast Select
+ */
+#define TSI148_LCSR_DBS_M (0x1FFFFF<<0) /* Mask */
+
+/*
+ * GCSR Register Group
+ */
+
+/*
+ * GCSR Control and Status Register CRG + $604
+ */
+#define TSI148_GCSR_GCTRL_LRST (1<<15) /* Local Reset */
+#define TSI148_GCSR_GCTRL_SFAILEN (1<<14) /* System Fail enable */
+#define TSI148_GCSR_GCTRL_BDFAILS (1<<13) /* Board Fail Status */
+#define TSI148_GCSR_GCTRL_SCON (1<<12) /* System Copntroller */
+#define TSI148_GCSR_GCTRL_MEN (1<<11) /* Module Enable (READY) */
+
+#define TSI148_GCSR_GCTRL_LMI3S (1<<7) /* Loc Monitor 3 Int Status */
+#define TSI148_GCSR_GCTRL_LMI2S (1<<6) /* Loc Monitor 2 Int Status */
+#define TSI148_GCSR_GCTRL_LMI1S (1<<5) /* Loc Monitor 1 Int Status */
+#define TSI148_GCSR_GCTRL_LMI0S (1<<4) /* Loc Monitor 0 Int Status */
+#define TSI148_GCSR_GCTRL_MBI3S (1<<3) /* Mail box 3 Int Status */
+#define TSI148_GCSR_GCTRL_MBI2S (1<<2) /* Mail box 2 Int Status */
+#define TSI148_GCSR_GCTRL_MBI1S (1<<1) /* Mail box 1 Int Status */
+#define TSI148_GCSR_GCTRL_MBI0S (1<<0) /* Mail box 0 Int Status */
+
+#define TSI148_GCSR_GAP (1<<5) /* Geographic Addr Parity */
+#define TSI148_GCSR_GA_M (0x1F<<0) /* Geographic Address Mask */
+
+/*
+ * CR/CSR Register Group
+ */
+
+/*
+ * CR/CSR Bit Clear Register CRG + $FF4
+ */
+#define TSI148_CRCSR_CSRBCR_LRSTC (1<<7) /* Local Reset Clear */
+#define TSI148_CRCSR_CSRBCR_SFAILC (1<<6) /* System Fail Enable Clear */
+#define TSI148_CRCSR_CSRBCR_BDFAILS (1<<5) /* Board Fail Status */
+#define TSI148_CRCSR_CSRBCR_MENC (1<<4) /* Module Enable Clear */
+#define TSI148_CRCSR_CSRBCR_BERRSC (1<<3) /* Bus Error Status Clear */
+
+/*
+ * CR/CSR Bit Set Register CRG+$FF8
+ */
+#define TSI148_CRCSR_CSRBSR_LISTS (1<<7) /* Local Reset Clear */
+#define TSI148_CRCSR_CSRBSR_SFAILS (1<<6) /* System Fail Enable Clear */
+#define TSI148_CRCSR_CSRBSR_BDFAILS (1<<5) /* Board Fail Status */
+#define TSI148_CRCSR_CSRBSR_MENS (1<<4) /* Module Enable Clear */
+#define TSI148_CRCSR_CSRBSR_BERRS (1<<3) /* Bus Error Status Clear */
+
+/*
+ * CR/CSR Base Address Register CRG + FFC
+ */
+#define TSI148_CRCSR_CBAR_M (0x1F<<3) /* Mask */
+
+#endif /* TSI148_H */
--- /dev/null
+/*
+ * VME Bridge Framework
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * Based on work by Tom Armistead and Ajit Prem
+ * Copyright 2004 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/syscalls.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/vme.h>
+
+#include "vme_bridge.h"
+
+/* Bitmask and list of registered buses both protected by common mutex */
+static unsigned int vme_bus_numbers;
+static LIST_HEAD(vme_bus_list);
+static DEFINE_MUTEX(vme_buses_lock);
+
+static void __exit vme_exit(void);
+static int __init vme_init(void);
+
+static struct vme_dev *dev_to_vme_dev(struct device *dev)
+{
+ return container_of(dev, struct vme_dev, dev);
+}
+
+/*
+ * Find the bridge that the resource is associated with.
+ */
+static struct vme_bridge *find_bridge(struct vme_resource *resource)
+{
+ /* Get list to search */
+ switch (resource->type) {
+ case VME_MASTER:
+ return list_entry(resource->entry, struct vme_master_resource,
+ list)->parent;
+ break;
+ case VME_SLAVE:
+ return list_entry(resource->entry, struct vme_slave_resource,
+ list)->parent;
+ break;
+ case VME_DMA:
+ return list_entry(resource->entry, struct vme_dma_resource,
+ list)->parent;
+ break;
+ case VME_LM:
+ return list_entry(resource->entry, struct vme_lm_resource,
+ list)->parent;
+ break;
+ default:
+ printk(KERN_ERR "Unknown resource type\n");
+ return NULL;
+ break;
+ }
+}
+
+/*
+ * Allocate a contiguous block of memory for use by the driver. This is used to
+ * create the buffers for the slave windows.
+ */
+void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
+ dma_addr_t *dma)
+{
+ struct vme_bridge *bridge;
+
+ if (resource == NULL) {
+ printk(KERN_ERR "No resource\n");
+ return NULL;
+ }
+
+ bridge = find_bridge(resource);
+ if (bridge == NULL) {
+ printk(KERN_ERR "Can't find bridge\n");
+ return NULL;
+ }
+
+ if (bridge->parent == NULL) {
+ printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
+ return NULL;
+ }
+
+ if (bridge->alloc_consistent == NULL) {
+ printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
+ bridge->name);
+ return NULL;
+ }
+
+ return bridge->alloc_consistent(bridge->parent, size, dma);
+}
+EXPORT_SYMBOL(vme_alloc_consistent);
+
+/*
+ * Free previously allocated contiguous block of memory.
+ */
+void vme_free_consistent(struct vme_resource *resource, size_t size,
+ void *vaddr, dma_addr_t dma)
+{
+ struct vme_bridge *bridge;
+
+ if (resource == NULL) {
+ printk(KERN_ERR "No resource\n");
+ return;
+ }
+
+ bridge = find_bridge(resource);
+ if (bridge == NULL) {
+ printk(KERN_ERR "Can't find bridge\n");
+ return;
+ }
+
+ if (bridge->parent == NULL) {
+ printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
+ return;
+ }
+
+ if (bridge->free_consistent == NULL) {
+ printk(KERN_ERR "free_consistent not supported by bridge %s\n",
+ bridge->name);
+ return;
+ }
+
+ bridge->free_consistent(bridge->parent, size, vaddr, dma);
+}
+EXPORT_SYMBOL(vme_free_consistent);
+
+size_t vme_get_size(struct vme_resource *resource)
+{
+ int enabled, retval;
+ unsigned long long base, size;
+ dma_addr_t buf_base;
+ u32 aspace, cycle, dwidth;
+
+ switch (resource->type) {
+ case VME_MASTER:
+ retval = vme_master_get(resource, &enabled, &base, &size,
+ &aspace, &cycle, &dwidth);
+
+ return size;
+ break;
+ case VME_SLAVE:
+ retval = vme_slave_get(resource, &enabled, &base, &size,
+ &buf_base, &aspace, &cycle);
+
+ return size;
+ break;
+ case VME_DMA:
+ return 0;
+ break;
+ default:
+ printk(KERN_ERR "Unknown resource type\n");
+ return 0;
+ break;
+ }
+}
+EXPORT_SYMBOL(vme_get_size);
+
+static int vme_check_window(u32 aspace, unsigned long long vme_base,
+ unsigned long long size)
+{
+ int retval = 0;
+
+ switch (aspace) {
+ case VME_A16:
+ if (((vme_base + size) > VME_A16_MAX) ||
+ (vme_base > VME_A16_MAX))
+ retval = -EFAULT;
+ break;
+ case VME_A24:
+ if (((vme_base + size) > VME_A24_MAX) ||
+ (vme_base > VME_A24_MAX))
+ retval = -EFAULT;
+ break;
+ case VME_A32:
+ if (((vme_base + size) > VME_A32_MAX) ||
+ (vme_base > VME_A32_MAX))
+ retval = -EFAULT;
+ break;
+ case VME_A64:
+ /*
+ * Any value held in an unsigned long long can be used as the
+ * base
+ */
+ break;
+ case VME_CRCSR:
+ if (((vme_base + size) > VME_CRCSR_MAX) ||
+ (vme_base > VME_CRCSR_MAX))
+ retval = -EFAULT;
+ break;
+ case VME_USER1:
+ case VME_USER2:
+ case VME_USER3:
+ case VME_USER4:
+ /* User Defined */
+ break;
+ default:
+ printk(KERN_ERR "Invalid address space\n");
+ retval = -EINVAL;
+ break;
+ }
+
+ return retval;
+}
+
+/*
+ * Request a slave image with specific attributes, return some unique
+ * identifier.
+ */
+struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
+ u32 cycle)
+{
+ struct vme_bridge *bridge;
+ struct list_head *slave_pos = NULL;
+ struct vme_slave_resource *allocated_image = NULL;
+ struct vme_slave_resource *slave_image = NULL;
+ struct vme_resource *resource = NULL;
+
+ bridge = vdev->bridge;
+ if (bridge == NULL) {
+ printk(KERN_ERR "Can't find VME bus\n");
+ goto err_bus;
+ }
+
+ /* Loop through slave resources */
+ list_for_each(slave_pos, &bridge->slave_resources) {
+ slave_image = list_entry(slave_pos,
+ struct vme_slave_resource, list);
+
+ if (slave_image == NULL) {
+ printk(KERN_ERR "Registered NULL Slave resource\n");
+ continue;
+ }
+
+ /* Find an unlocked and compatible image */
+ mutex_lock(&slave_image->mtx);
+ if (((slave_image->address_attr & address) == address) &&
+ ((slave_image->cycle_attr & cycle) == cycle) &&
+ (slave_image->locked == 0)) {
+
+ slave_image->locked = 1;
+ mutex_unlock(&slave_image->mtx);
+ allocated_image = slave_image;
+ break;
+ }
+ mutex_unlock(&slave_image->mtx);
+ }
+
+ /* No free image */
+ if (allocated_image == NULL)
+ goto err_image;
+
+ resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
+ if (resource == NULL) {
+ printk(KERN_WARNING "Unable to allocate resource structure\n");
+ goto err_alloc;
+ }
+ resource->type = VME_SLAVE;
+ resource->entry = &allocated_image->list;
+
+ return resource;
+
+err_alloc:
+ /* Unlock image */
+ mutex_lock(&slave_image->mtx);
+ slave_image->locked = 0;
+ mutex_unlock(&slave_image->mtx);
+err_image:
+err_bus:
+ return NULL;
+}
+EXPORT_SYMBOL(vme_slave_request);
+
+int vme_slave_set(struct vme_resource *resource, int enabled,
+ unsigned long long vme_base, unsigned long long size,
+ dma_addr_t buf_base, u32 aspace, u32 cycle)
+{
+ struct vme_bridge *bridge = find_bridge(resource);
+ struct vme_slave_resource *image;
+ int retval;
+
+ if (resource->type != VME_SLAVE) {
+ printk(KERN_ERR "Not a slave resource\n");
+ return -EINVAL;
+ }
+
+ image = list_entry(resource->entry, struct vme_slave_resource, list);
+
+ if (bridge->slave_set == NULL) {
+ printk(KERN_ERR "Function not supported\n");
+ return -ENOSYS;
+ }
+
+ if (!(((image->address_attr & aspace) == aspace) &&
+ ((image->cycle_attr & cycle) == cycle))) {
+ printk(KERN_ERR "Invalid attributes\n");
+ return -EINVAL;
+ }
+
+ retval = vme_check_window(aspace, vme_base, size);
+ if (retval)
+ return retval;
+
+ return bridge->slave_set(image, enabled, vme_base, size, buf_base,
+ aspace, cycle);
+}
+EXPORT_SYMBOL(vme_slave_set);
+
+int vme_slave_get(struct vme_resource *resource, int *enabled,
+ unsigned long long *vme_base, unsigned long long *size,
+ dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
+{
+ struct vme_bridge *bridge = find_bridge(resource);
+ struct vme_slave_resource *image;
+
+ if (resource->type != VME_SLAVE) {
+ printk(KERN_ERR "Not a slave resource\n");
+ return -EINVAL;
+ }
+
+ image = list_entry(resource->entry, struct vme_slave_resource, list);
+
+ if (bridge->slave_get == NULL) {
+ printk(KERN_ERR "vme_slave_get not supported\n");
+ return -EINVAL;
+ }
+
+ return bridge->slave_get(image, enabled, vme_base, size, buf_base,
+ aspace, cycle);
+}
+EXPORT_SYMBOL(vme_slave_get);
+
+void vme_slave_free(struct vme_resource *resource)
+{
+ struct vme_slave_resource *slave_image;
+
+ if (resource->type != VME_SLAVE) {
+ printk(KERN_ERR "Not a slave resource\n");
+ return;
+ }
+
+ slave_image = list_entry(resource->entry, struct vme_slave_resource,
+ list);
+ if (slave_image == NULL) {
+ printk(KERN_ERR "Can't find slave resource\n");
+ return;
+ }
+
+ /* Unlock image */
+ mutex_lock(&slave_image->mtx);
+ if (slave_image->locked == 0)
+ printk(KERN_ERR "Image is already free\n");
+
+ slave_image->locked = 0;
+ mutex_unlock(&slave_image->mtx);
+
+ /* Free up resource memory */
+ kfree(resource);
+}
+EXPORT_SYMBOL(vme_slave_free);
+
+/*
+ * Request a master image with specific attributes, return some unique
+ * identifier.
+ */
+struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
+ u32 cycle, u32 dwidth)
+{
+ struct vme_bridge *bridge;
+ struct list_head *master_pos = NULL;
+ struct vme_master_resource *allocated_image = NULL;
+ struct vme_master_resource *master_image = NULL;
+ struct vme_resource *resource = NULL;
+
+ bridge = vdev->bridge;
+ if (bridge == NULL) {
+ printk(KERN_ERR "Can't find VME bus\n");
+ goto err_bus;
+ }
+
+ /* Loop through master resources */
+ list_for_each(master_pos, &bridge->master_resources) {
+ master_image = list_entry(master_pos,
+ struct vme_master_resource, list);
+
+ if (master_image == NULL) {
+ printk(KERN_WARNING "Registered NULL master resource\n");
+ continue;
+ }
+
+ /* Find an unlocked and compatible image */
+ spin_lock(&master_image->lock);
+ if (((master_image->address_attr & address) == address) &&
+ ((master_image->cycle_attr & cycle) == cycle) &&
+ ((master_image->width_attr & dwidth) == dwidth) &&
+ (master_image->locked == 0)) {
+
+ master_image->locked = 1;
+ spin_unlock(&master_image->lock);
+ allocated_image = master_image;
+ break;
+ }
+ spin_unlock(&master_image->lock);
+ }
+
+ /* Check to see if we found a resource */
+ if (allocated_image == NULL) {
+ printk(KERN_ERR "Can't find a suitable resource\n");
+ goto err_image;
+ }
+
+ resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
+ if (resource == NULL) {
+ printk(KERN_ERR "Unable to allocate resource structure\n");
+ goto err_alloc;
+ }
+ resource->type = VME_MASTER;
+ resource->entry = &allocated_image->list;
+
+ return resource;
+
+err_alloc:
+ /* Unlock image */
+ spin_lock(&master_image->lock);
+ master_image->locked = 0;
+ spin_unlock(&master_image->lock);
+err_image:
+err_bus:
+ return NULL;
+}
+EXPORT_SYMBOL(vme_master_request);
+
+int vme_master_set(struct vme_resource *resource, int enabled,
+ unsigned long long vme_base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 dwidth)
+{
+ struct vme_bridge *bridge = find_bridge(resource);
+ struct vme_master_resource *image;
+ int retval;
+
+ if (resource->type != VME_MASTER) {
+ printk(KERN_ERR "Not a master resource\n");
+ return -EINVAL;
+ }
+
+ image = list_entry(resource->entry, struct vme_master_resource, list);
+
+ if (bridge->master_set == NULL) {
+ printk(KERN_WARNING "vme_master_set not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(((image->address_attr & aspace) == aspace) &&
+ ((image->cycle_attr & cycle) == cycle) &&
+ ((image->width_attr & dwidth) == dwidth))) {
+ printk(KERN_WARNING "Invalid attributes\n");
+ return -EINVAL;
+ }
+
+ retval = vme_check_window(aspace, vme_base, size);
+ if (retval)
+ return retval;
+
+ return bridge->master_set(image, enabled, vme_base, size, aspace,
+ cycle, dwidth);
+}
+EXPORT_SYMBOL(vme_master_set);
+
+int vme_master_get(struct vme_resource *resource, int *enabled,
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
+{
+ struct vme_bridge *bridge = find_bridge(resource);
+ struct vme_master_resource *image;
+
+ if (resource->type != VME_MASTER) {
+ printk(KERN_ERR "Not a master resource\n");
+ return -EINVAL;
+ }
+
+ image = list_entry(resource->entry, struct vme_master_resource, list);
+
+ if (bridge->master_get == NULL) {
+ printk(KERN_WARNING "vme_master_set not supported\n");
+ return -EINVAL;
+ }
+
+ return bridge->master_get(image, enabled, vme_base, size, aspace,
+ cycle, dwidth);
+}
+EXPORT_SYMBOL(vme_master_get);
+
+/*
+ * Read data out of VME space into a buffer.
+ */
+ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
+ loff_t offset)
+{
+ struct vme_bridge *bridge = find_bridge(resource);
+ struct vme_master_resource *image;
+ size_t length;
+
+ if (bridge->master_read == NULL) {
+ printk(KERN_WARNING "Reading from resource not supported\n");
+ return -EINVAL;
+ }
+
+ if (resource->type != VME_MASTER) {
+ printk(KERN_ERR "Not a master resource\n");
+ return -EINVAL;
+ }
+
+ image = list_entry(resource->entry, struct vme_master_resource, list);
+
+ length = vme_get_size(resource);
+
+ if (offset > length) {
+ printk(KERN_WARNING "Invalid Offset\n");
+ return -EFAULT;
+ }
+
+ if ((offset + count) > length)
+ count = length - offset;
+
+ return bridge->master_read(image, buf, count, offset);
+
+}
+EXPORT_SYMBOL(vme_master_read);
+
+/*
+ * Write data out to VME space from a buffer.
+ */
+ssize_t vme_master_write(struct vme_resource *resource, void *buf,
+ size_t count, loff_t offset)
+{
+ struct vme_bridge *bridge = find_bridge(resource);
+ struct vme_master_resource *image;
+ size_t length;
+
+ if (bridge->master_write == NULL) {
+ printk(KERN_WARNING "Writing to resource not supported\n");
+ return -EINVAL;
+ }
+
+ if (resource->type != VME_MASTER) {
+ printk(KERN_ERR "Not a master resource\n");
+ return -EINVAL;
+ }
+
+ image = list_entry(resource->entry, struct vme_master_resource, list);
+
+ length = vme_get_size(resource);
+
+ if (offset > length) {
+ printk(KERN_WARNING "Invalid Offset\n");
+ return -EFAULT;
+ }
+
+ if ((offset + count) > length)
+ count = length - offset;
+
+ return bridge->master_write(image, buf, count, offset);
+}
+EXPORT_SYMBOL(vme_master_write);
+
+/*
+ * Perform RMW cycle to provided location.
+ */
+unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
+ unsigned int compare, unsigned int swap, loff_t offset)
+{
+ struct vme_bridge *bridge = find_bridge(resource);
+ struct vme_master_resource *image;
+
+ if (bridge->master_rmw == NULL) {
+ printk(KERN_WARNING "Writing to resource not supported\n");
+ return -EINVAL;
+ }
+
+ if (resource->type != VME_MASTER) {
+ printk(KERN_ERR "Not a master resource\n");
+ return -EINVAL;
+ }
+
+ image = list_entry(resource->entry, struct vme_master_resource, list);
+
+ return bridge->master_rmw(image, mask, compare, swap, offset);
+}
+EXPORT_SYMBOL(vme_master_rmw);
+
+void vme_master_free(struct vme_resource *resource)
+{
+ struct vme_master_resource *master_image;
+
+ if (resource->type != VME_MASTER) {
+ printk(KERN_ERR "Not a master resource\n");
+ return;
+ }
+
+ master_image = list_entry(resource->entry, struct vme_master_resource,
+ list);
+ if (master_image == NULL) {
+ printk(KERN_ERR "Can't find master resource\n");
+ return;
+ }
+
+ /* Unlock image */
+ spin_lock(&master_image->lock);
+ if (master_image->locked == 0)
+ printk(KERN_ERR "Image is already free\n");
+
+ master_image->locked = 0;
+ spin_unlock(&master_image->lock);
+
+ /* Free up resource memory */
+ kfree(resource);
+}
+EXPORT_SYMBOL(vme_master_free);
+
+/*
+ * Request a DMA controller with specific attributes, return some unique
+ * identifier.
+ */
+struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
+{
+ struct vme_bridge *bridge;
+ struct list_head *dma_pos = NULL;
+ struct vme_dma_resource *allocated_ctrlr = NULL;
+ struct vme_dma_resource *dma_ctrlr = NULL;
+ struct vme_resource *resource = NULL;
+
+ /* XXX Not checking resource attributes */
+ printk(KERN_ERR "No VME resource Attribute tests done\n");
+
+ bridge = vdev->bridge;
+ if (bridge == NULL) {
+ printk(KERN_ERR "Can't find VME bus\n");
+ goto err_bus;
+ }
+
+ /* Loop through DMA resources */
+ list_for_each(dma_pos, &bridge->dma_resources) {
+ dma_ctrlr = list_entry(dma_pos,
+ struct vme_dma_resource, list);
+
+ if (dma_ctrlr == NULL) {
+ printk(KERN_ERR "Registered NULL DMA resource\n");
+ continue;
+ }
+
+ /* Find an unlocked and compatible controller */
+ mutex_lock(&dma_ctrlr->mtx);
+ if (((dma_ctrlr->route_attr & route) == route) &&
+ (dma_ctrlr->locked == 0)) {
+
+ dma_ctrlr->locked = 1;
+ mutex_unlock(&dma_ctrlr->mtx);
+ allocated_ctrlr = dma_ctrlr;
+ break;
+ }
+ mutex_unlock(&dma_ctrlr->mtx);
+ }
+
+ /* Check to see if we found a resource */
+ if (allocated_ctrlr == NULL)
+ goto err_ctrlr;
+
+ resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
+ if (resource == NULL) {
+ printk(KERN_WARNING "Unable to allocate resource structure\n");
+ goto err_alloc;
+ }
+ resource->type = VME_DMA;
+ resource->entry = &allocated_ctrlr->list;
+
+ return resource;
+
+err_alloc:
+ /* Unlock image */
+ mutex_lock(&dma_ctrlr->mtx);
+ dma_ctrlr->locked = 0;
+ mutex_unlock(&dma_ctrlr->mtx);
+err_ctrlr:
+err_bus:
+ return NULL;
+}
+EXPORT_SYMBOL(vme_dma_request);
+
+/*
+ * Start new list
+ */
+struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
+{
+ struct vme_dma_resource *ctrlr;
+ struct vme_dma_list *dma_list;
+
+ if (resource->type != VME_DMA) {
+ printk(KERN_ERR "Not a DMA resource\n");
+ return NULL;
+ }
+
+ ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
+
+ dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
+ if (dma_list == NULL) {
+ printk(KERN_ERR "Unable to allocate memory for new dma list\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&dma_list->entries);
+ dma_list->parent = ctrlr;
+ mutex_init(&dma_list->mtx);
+
+ return dma_list;
+}
+EXPORT_SYMBOL(vme_new_dma_list);
+
+/*
+ * Create "Pattern" type attributes
+ */
+struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
+{
+ struct vme_dma_attr *attributes;
+ struct vme_dma_pattern *pattern_attr;
+
+ attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
+ if (attributes == NULL) {
+ printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
+ goto err_attr;
+ }
+
+ pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
+ if (pattern_attr == NULL) {
+ printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
+ goto err_pat;
+ }
+
+ attributes->type = VME_DMA_PATTERN;
+ attributes->private = (void *)pattern_attr;
+
+ pattern_attr->pattern = pattern;
+ pattern_attr->type = type;
+
+ return attributes;
+
+err_pat:
+ kfree(attributes);
+err_attr:
+ return NULL;
+}
+EXPORT_SYMBOL(vme_dma_pattern_attribute);
+
+/*
+ * Create "PCI" type attributes
+ */
+struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
+{
+ struct vme_dma_attr *attributes;
+ struct vme_dma_pci *pci_attr;
+
+ /* XXX Run some sanity checks here */
+
+ attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
+ if (attributes == NULL) {
+ printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
+ goto err_attr;
+ }
+
+ pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
+ if (pci_attr == NULL) {
+ printk(KERN_ERR "Unable to allocate memory for pci attributes\n");
+ goto err_pci;
+ }
+
+
+
+ attributes->type = VME_DMA_PCI;
+ attributes->private = (void *)pci_attr;
+
+ pci_attr->address = address;
+
+ return attributes;
+
+err_pci:
+ kfree(attributes);
+err_attr:
+ return NULL;
+}
+EXPORT_SYMBOL(vme_dma_pci_attribute);
+
+/*
+ * Create "VME" type attributes
+ */
+struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
+ u32 aspace, u32 cycle, u32 dwidth)
+{
+ struct vme_dma_attr *attributes;
+ struct vme_dma_vme *vme_attr;
+
+ attributes = kmalloc(
+ sizeof(struct vme_dma_attr), GFP_KERNEL);
+ if (attributes == NULL) {
+ printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
+ goto err_attr;
+ }
+
+ vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
+ if (vme_attr == NULL) {
+ printk(KERN_ERR "Unable to allocate memory for vme attributes\n");
+ goto err_vme;
+ }
+
+ attributes->type = VME_DMA_VME;
+ attributes->private = (void *)vme_attr;
+
+ vme_attr->address = address;
+ vme_attr->aspace = aspace;
+ vme_attr->cycle = cycle;
+ vme_attr->dwidth = dwidth;
+
+ return attributes;
+
+err_vme:
+ kfree(attributes);
+err_attr:
+ return NULL;
+}
+EXPORT_SYMBOL(vme_dma_vme_attribute);
+
+/*
+ * Free attribute
+ */
+void vme_dma_free_attribute(struct vme_dma_attr *attributes)
+{
+ kfree(attributes->private);
+ kfree(attributes);
+}
+EXPORT_SYMBOL(vme_dma_free_attribute);
+
+int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
+ struct vme_dma_attr *dest, size_t count)
+{
+ struct vme_bridge *bridge = list->parent->parent;
+ int retval;
+
+ if (bridge->dma_list_add == NULL) {
+ printk(KERN_WARNING "Link List DMA generation not supported\n");
+ return -EINVAL;
+ }
+
+ if (!mutex_trylock(&list->mtx)) {
+ printk(KERN_ERR "Link List already submitted\n");
+ return -EINVAL;
+ }
+
+ retval = bridge->dma_list_add(list, src, dest, count);
+
+ mutex_unlock(&list->mtx);
+
+ return retval;
+}
+EXPORT_SYMBOL(vme_dma_list_add);
+
+int vme_dma_list_exec(struct vme_dma_list *list)
+{
+ struct vme_bridge *bridge = list->parent->parent;
+ int retval;
+
+ if (bridge->dma_list_exec == NULL) {
+ printk(KERN_ERR "Link List DMA execution not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&list->mtx);
+
+ retval = bridge->dma_list_exec(list);
+
+ mutex_unlock(&list->mtx);
+
+ return retval;
+}
+EXPORT_SYMBOL(vme_dma_list_exec);
+
+int vme_dma_list_free(struct vme_dma_list *list)
+{
+ struct vme_bridge *bridge = list->parent->parent;
+ int retval;
+
+ if (bridge->dma_list_empty == NULL) {
+ printk(KERN_WARNING "Emptying of Link Lists not supported\n");
+ return -EINVAL;
+ }
+
+ if (!mutex_trylock(&list->mtx)) {
+ printk(KERN_ERR "Link List in use\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Empty out all of the entries from the dma list. We need to go to the
+ * low level driver as dma entries are driver specific.
+ */
+ retval = bridge->dma_list_empty(list);
+ if (retval) {
+ printk(KERN_ERR "Unable to empty link-list entries\n");
+ mutex_unlock(&list->mtx);
+ return retval;
+ }
+ mutex_unlock(&list->mtx);
+ kfree(list);
+
+ return retval;
+}
+EXPORT_SYMBOL(vme_dma_list_free);
+
+int vme_dma_free(struct vme_resource *resource)
+{
+ struct vme_dma_resource *ctrlr;
+
+ if (resource->type != VME_DMA) {
+ printk(KERN_ERR "Not a DMA resource\n");
+ return -EINVAL;
+ }
+
+ ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
+
+ if (!mutex_trylock(&ctrlr->mtx)) {
+ printk(KERN_ERR "Resource busy, can't free\n");
+ return -EBUSY;
+ }
+
+ if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
+ printk(KERN_WARNING "Resource still processing transfers\n");
+ mutex_unlock(&ctrlr->mtx);
+ return -EBUSY;
+ }
+
+ ctrlr->locked = 0;
+
+ mutex_unlock(&ctrlr->mtx);
+
+ return 0;
+}
+EXPORT_SYMBOL(vme_dma_free);
+
+void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
+{
+ void (*call)(int, int, void *);
+ void *priv_data;
+
+ call = bridge->irq[level - 1].callback[statid].func;
+ priv_data = bridge->irq[level - 1].callback[statid].priv_data;
+
+ if (call != NULL)
+ call(level, statid, priv_data);
+ else
+ printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n",
+ level, statid);
+}
+EXPORT_SYMBOL(vme_irq_handler);
+
+int vme_irq_request(struct vme_dev *vdev, int level, int statid,
+ void (*callback)(int, int, void *),
+ void *priv_data)
+{
+ struct vme_bridge *bridge;
+
+ bridge = vdev->bridge;
+ if (bridge == NULL) {
+ printk(KERN_ERR "Can't find VME bus\n");
+ return -EINVAL;
+ }
+
+ if ((level < 1) || (level > 7)) {
+ printk(KERN_ERR "Invalid interrupt level\n");
+ return -EINVAL;
+ }
+
+ if (bridge->irq_set == NULL) {
+ printk(KERN_ERR "Configuring interrupts not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&bridge->irq_mtx);
+
+ if (bridge->irq[level - 1].callback[statid].func) {
+ mutex_unlock(&bridge->irq_mtx);
+ printk(KERN_WARNING "VME Interrupt already taken\n");
+ return -EBUSY;
+ }
+
+ bridge->irq[level - 1].count++;
+ bridge->irq[level - 1].callback[statid].priv_data = priv_data;
+ bridge->irq[level - 1].callback[statid].func = callback;
+
+ /* Enable IRQ level */
+ bridge->irq_set(bridge, level, 1, 1);
+
+ mutex_unlock(&bridge->irq_mtx);
+
+ return 0;
+}
+EXPORT_SYMBOL(vme_irq_request);
+
+void vme_irq_free(struct vme_dev *vdev, int level, int statid)
+{
+ struct vme_bridge *bridge;
+
+ bridge = vdev->bridge;
+ if (bridge == NULL) {
+ printk(KERN_ERR "Can't find VME bus\n");
+ return;
+ }
+
+ if ((level < 1) || (level > 7)) {
+ printk(KERN_ERR "Invalid interrupt level\n");
+ return;
+ }
+
+ if (bridge->irq_set == NULL) {
+ printk(KERN_ERR "Configuring interrupts not supported\n");
+ return;
+ }
+
+ mutex_lock(&bridge->irq_mtx);
+
+ bridge->irq[level - 1].count--;
+
+ /* Disable IRQ level if no more interrupts attached at this level*/
+ if (bridge->irq[level - 1].count == 0)
+ bridge->irq_set(bridge, level, 0, 1);
+
+ bridge->irq[level - 1].callback[statid].func = NULL;
+ bridge->irq[level - 1].callback[statid].priv_data = NULL;
+
+ mutex_unlock(&bridge->irq_mtx);
+}
+EXPORT_SYMBOL(vme_irq_free);
+
+int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
+{
+ struct vme_bridge *bridge;
+
+ bridge = vdev->bridge;
+ if (bridge == NULL) {
+ printk(KERN_ERR "Can't find VME bus\n");
+ return -EINVAL;
+ }
+
+ if ((level < 1) || (level > 7)) {
+ printk(KERN_WARNING "Invalid interrupt level\n");
+ return -EINVAL;
+ }
+
+ if (bridge->irq_generate == NULL) {
+ printk(KERN_WARNING "Interrupt generation not supported\n");
+ return -EINVAL;
+ }
+
+ return bridge->irq_generate(bridge, level, statid);
+}
+EXPORT_SYMBOL(vme_irq_generate);
+
+/*
+ * Request the location monitor, return resource or NULL
+ */
+struct vme_resource *vme_lm_request(struct vme_dev *vdev)
+{
+ struct vme_bridge *bridge;
+ struct list_head *lm_pos = NULL;
+ struct vme_lm_resource *allocated_lm = NULL;
+ struct vme_lm_resource *lm = NULL;
+ struct vme_resource *resource = NULL;
+
+ bridge = vdev->bridge;
+ if (bridge == NULL) {
+ printk(KERN_ERR "Can't find VME bus\n");
+ goto err_bus;
+ }
+
+ /* Loop through DMA resources */
+ list_for_each(lm_pos, &bridge->lm_resources) {
+ lm = list_entry(lm_pos,
+ struct vme_lm_resource, list);
+
+ if (lm == NULL) {
+ printk(KERN_ERR "Registered NULL Location Monitor resource\n");
+ continue;
+ }
+
+ /* Find an unlocked controller */
+ mutex_lock(&lm->mtx);
+ if (lm->locked == 0) {
+ lm->locked = 1;
+ mutex_unlock(&lm->mtx);
+ allocated_lm = lm;
+ break;
+ }
+ mutex_unlock(&lm->mtx);
+ }
+
+ /* Check to see if we found a resource */
+ if (allocated_lm == NULL)
+ goto err_lm;
+
+ resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
+ if (resource == NULL) {
+ printk(KERN_ERR "Unable to allocate resource structure\n");
+ goto err_alloc;
+ }
+ resource->type = VME_LM;
+ resource->entry = &allocated_lm->list;
+
+ return resource;
+
+err_alloc:
+ /* Unlock image */
+ mutex_lock(&lm->mtx);
+ lm->locked = 0;
+ mutex_unlock(&lm->mtx);
+err_lm:
+err_bus:
+ return NULL;
+}
+EXPORT_SYMBOL(vme_lm_request);
+
+int vme_lm_count(struct vme_resource *resource)
+{
+ struct vme_lm_resource *lm;
+
+ if (resource->type != VME_LM) {
+ printk(KERN_ERR "Not a Location Monitor resource\n");
+ return -EINVAL;
+ }
+
+ lm = list_entry(resource->entry, struct vme_lm_resource, list);
+
+ return lm->monitors;
+}
+EXPORT_SYMBOL(vme_lm_count);
+
+int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
+ u32 aspace, u32 cycle)
+{
+ struct vme_bridge *bridge = find_bridge(resource);
+ struct vme_lm_resource *lm;
+
+ if (resource->type != VME_LM) {
+ printk(KERN_ERR "Not a Location Monitor resource\n");
+ return -EINVAL;
+ }
+
+ lm = list_entry(resource->entry, struct vme_lm_resource, list);
+
+ if (bridge->lm_set == NULL) {
+ printk(KERN_ERR "vme_lm_set not supported\n");
+ return -EINVAL;
+ }
+
+ return bridge->lm_set(lm, lm_base, aspace, cycle);
+}
+EXPORT_SYMBOL(vme_lm_set);
+
+int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
+ u32 *aspace, u32 *cycle)
+{
+ struct vme_bridge *bridge = find_bridge(resource);
+ struct vme_lm_resource *lm;
+
+ if (resource->type != VME_LM) {
+ printk(KERN_ERR "Not a Location Monitor resource\n");
+ return -EINVAL;
+ }
+
+ lm = list_entry(resource->entry, struct vme_lm_resource, list);
+
+ if (bridge->lm_get == NULL) {
+ printk(KERN_ERR "vme_lm_get not supported\n");
+ return -EINVAL;
+ }
+
+ return bridge->lm_get(lm, lm_base, aspace, cycle);
+}
+EXPORT_SYMBOL(vme_lm_get);
+
+int vme_lm_attach(struct vme_resource *resource, int monitor,
+ void (*callback)(int))
+{
+ struct vme_bridge *bridge = find_bridge(resource);
+ struct vme_lm_resource *lm;
+
+ if (resource->type != VME_LM) {
+ printk(KERN_ERR "Not a Location Monitor resource\n");
+ return -EINVAL;
+ }
+
+ lm = list_entry(resource->entry, struct vme_lm_resource, list);
+
+ if (bridge->lm_attach == NULL) {
+ printk(KERN_ERR "vme_lm_attach not supported\n");
+ return -EINVAL;
+ }
+
+ return bridge->lm_attach(lm, monitor, callback);
+}
+EXPORT_SYMBOL(vme_lm_attach);
+
+int vme_lm_detach(struct vme_resource *resource, int monitor)
+{
+ struct vme_bridge *bridge = find_bridge(resource);
+ struct vme_lm_resource *lm;
+
+ if (resource->type != VME_LM) {
+ printk(KERN_ERR "Not a Location Monitor resource\n");
+ return -EINVAL;
+ }
+
+ lm = list_entry(resource->entry, struct vme_lm_resource, list);
+
+ if (bridge->lm_detach == NULL) {
+ printk(KERN_ERR "vme_lm_detach not supported\n");
+ return -EINVAL;
+ }
+
+ return bridge->lm_detach(lm, monitor);
+}
+EXPORT_SYMBOL(vme_lm_detach);
+
+void vme_lm_free(struct vme_resource *resource)
+{
+ struct vme_lm_resource *lm;
+
+ if (resource->type != VME_LM) {
+ printk(KERN_ERR "Not a Location Monitor resource\n");
+ return;
+ }
+
+ lm = list_entry(resource->entry, struct vme_lm_resource, list);
+
+ mutex_lock(&lm->mtx);
+
+ /* XXX
+ * Check to see that there aren't any callbacks still attached, if
+ * there are we should probably be detaching them!
+ */
+
+ lm->locked = 0;
+
+ mutex_unlock(&lm->mtx);
+
+ kfree(resource);
+}
+EXPORT_SYMBOL(vme_lm_free);
+
+int vme_slot_get(struct vme_dev *vdev)
+{
+ struct vme_bridge *bridge;
+
+ bridge = vdev->bridge;
+ if (bridge == NULL) {
+ printk(KERN_ERR "Can't find VME bus\n");
+ return -EINVAL;
+ }
+
+ if (bridge->slot_get == NULL) {
+ printk(KERN_WARNING "vme_slot_get not supported\n");
+ return -EINVAL;
+ }
+
+ return bridge->slot_get(bridge);
+}
+EXPORT_SYMBOL(vme_slot_get);
+
+
+/* - Bridge Registration --------------------------------------------------- */
+
+static void vme_dev_release(struct device *dev)
+{
+ kfree(dev_to_vme_dev(dev));
+}
+
+int vme_register_bridge(struct vme_bridge *bridge)
+{
+ int i;
+ int ret = -1;
+
+ mutex_lock(&vme_buses_lock);
+ for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
+ if ((vme_bus_numbers & (1 << i)) == 0) {
+ vme_bus_numbers |= (1 << i);
+ bridge->num = i;
+ INIT_LIST_HEAD(&bridge->devices);
+ list_add_tail(&bridge->bus_list, &vme_bus_list);
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&vme_buses_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(vme_register_bridge);
+
+void vme_unregister_bridge(struct vme_bridge *bridge)
+{
+ struct vme_dev *vdev;
+ struct vme_dev *tmp;
+
+ mutex_lock(&vme_buses_lock);
+ vme_bus_numbers &= ~(1 << bridge->num);
+ list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
+ list_del(&vdev->drv_list);
+ list_del(&vdev->bridge_list);
+ device_unregister(&vdev->dev);
+ }
+ list_del(&bridge->bus_list);
+ mutex_unlock(&vme_buses_lock);
+}
+EXPORT_SYMBOL(vme_unregister_bridge);
+
+/* - Driver Registration --------------------------------------------------- */
+
+static int __vme_register_driver_bus(struct vme_driver *drv,
+ struct vme_bridge *bridge, unsigned int ndevs)
+{
+ int err;
+ unsigned int i;
+ struct vme_dev *vdev;
+ struct vme_dev *tmp;
+
+ for (i = 0; i < ndevs; i++) {
+ vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
+ if (!vdev) {
+ err = -ENOMEM;
+ goto err_devalloc;
+ }
+ vdev->num = i;
+ vdev->bridge = bridge;
+ vdev->dev.platform_data = drv;
+ vdev->dev.release = vme_dev_release;
+ vdev->dev.parent = bridge->parent;
+ vdev->dev.bus = &vme_bus_type;
+ dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
+ vdev->num);
+
+ err = device_register(&vdev->dev);
+ if (err)
+ goto err_reg;
+
+ if (vdev->dev.platform_data) {
+ list_add_tail(&vdev->drv_list, &drv->devices);
+ list_add_tail(&vdev->bridge_list, &bridge->devices);
+ } else
+ device_unregister(&vdev->dev);
+ }
+ return 0;
+
+err_reg:
+ kfree(vdev);
+err_devalloc:
+ list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
+ list_del(&vdev->drv_list);
+ list_del(&vdev->bridge_list);
+ device_unregister(&vdev->dev);
+ }
+ return err;
+}
+
+static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
+{
+ struct vme_bridge *bridge;
+ int err = 0;
+
+ mutex_lock(&vme_buses_lock);
+ list_for_each_entry(bridge, &vme_bus_list, bus_list) {
+ /*
+ * This cannot cause trouble as we already have vme_buses_lock
+ * and if the bridge is removed, it will have to go through
+ * vme_unregister_bridge() to do it (which calls remove() on
+ * the bridge which in turn tries to acquire vme_buses_lock and
+ * will have to wait).
+ */
+ err = __vme_register_driver_bus(drv, bridge, ndevs);
+ if (err)
+ break;
+ }
+ mutex_unlock(&vme_buses_lock);
+ return err;
+}
+
+int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
+{
+ int err;
+
+ drv->driver.name = drv->name;
+ drv->driver.bus = &vme_bus_type;
+ INIT_LIST_HEAD(&drv->devices);
+
+ err = driver_register(&drv->driver);
+ if (err)
+ return err;
+
+ err = __vme_register_driver(drv, ndevs);
+ if (err)
+ driver_unregister(&drv->driver);
+
+ return err;
+}
+EXPORT_SYMBOL(vme_register_driver);
+
+void vme_unregister_driver(struct vme_driver *drv)
+{
+ struct vme_dev *dev, *dev_tmp;
+
+ mutex_lock(&vme_buses_lock);
+ list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
+ list_del(&dev->drv_list);
+ list_del(&dev->bridge_list);
+ device_unregister(&dev->dev);
+ }
+ mutex_unlock(&vme_buses_lock);
+
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(vme_unregister_driver);
+
+/* - Bus Registration ------------------------------------------------------ */
+
+static int vme_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct vme_driver *vme_drv;
+
+ vme_drv = container_of(drv, struct vme_driver, driver);
+
+ if (dev->platform_data == vme_drv) {
+ struct vme_dev *vdev = dev_to_vme_dev(dev);
+
+ if (vme_drv->match && vme_drv->match(vdev))
+ return 1;
+
+ dev->platform_data = NULL;
+ }
+ return 0;
+}
+
+static int vme_bus_probe(struct device *dev)
+{
+ int retval = -ENODEV;
+ struct vme_driver *driver;
+ struct vme_dev *vdev = dev_to_vme_dev(dev);
+
+ driver = dev->platform_data;
+
+ if (driver->probe != NULL)
+ retval = driver->probe(vdev);
+
+ return retval;
+}
+
+static int vme_bus_remove(struct device *dev)
+{
+ int retval = -ENODEV;
+ struct vme_driver *driver;
+ struct vme_dev *vdev = dev_to_vme_dev(dev);
+
+ driver = dev->platform_data;
+
+ if (driver->remove != NULL)
+ retval = driver->remove(vdev);
+
+ return retval;
+}
+
+struct bus_type vme_bus_type = {
+ .name = "vme",
+ .match = vme_bus_match,
+ .probe = vme_bus_probe,
+ .remove = vme_bus_remove,
+};
+EXPORT_SYMBOL(vme_bus_type);
+
+static int __init vme_init(void)
+{
+ return bus_register(&vme_bus_type);
+}
+
+static void __exit vme_exit(void)
+{
+ bus_unregister(&vme_bus_type);
+}
+
+MODULE_DESCRIPTION("VME bridge driver framework");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
+MODULE_LICENSE("GPL");
+
+module_init(vme_init);
+module_exit(vme_exit);
--- /dev/null
+ VME Device Driver API
+ =====================
+
+Driver registration
+===================
+
+As with other subsystems within the Linux kernel, VME device drivers register
+with the VME subsystem, typically called from the devices init routine. This is
+achieved via a call to the following function:
+
+ int vme_register_driver (struct vme_driver *driver);
+
+If driver registration is successful this function returns zero, if an error
+occurred a negative error code will be returned.
+
+A pointer to a structure of type 'vme_driver' must be provided to the
+registration function. The structure is as follows:
+
+ struct vme_driver {
+ struct list_head node;
+ const char *name;
+ int (*match)(struct vme_dev *);
+ int (*probe)(struct vme_dev *);
+ int (*remove)(struct vme_dev *);
+ void (*shutdown)(void);
+ struct device_driver driver;
+ struct list_head devices;
+ unsigned int ndev;
+ };
+
+At the minimum, the '.name', '.match' and '.probe' elements of this structure
+should be correctly set. The '.name' element is a pointer to a string holding
+the device driver's name.
+
+The '.match' function allows controlling the number of devices that need to
+be registered. The match function should return 1 if a device should be
+probed and 0 otherwise. This example match function (from vme_user.c) limits
+the number of devices probed to one:
+
+ #define USER_BUS_MAX 1
+ ...
+ static int vme_user_match(struct vme_dev *vdev)
+ {
+ if (vdev->id.num >= USER_BUS_MAX)
+ return 0;
+ return 1;
+ }
+
+The '.probe' element should contain a pointer to the probe routine. The
+probe routine is passed a 'struct vme_dev' pointer as an argument. The
+'struct vme_dev' structure looks like the following:
+
+ struct vme_dev {
+ int num;
+ struct vme_bridge *bridge;
+ struct device dev;
+ struct list_head drv_list;
+ struct list_head bridge_list;
+ };
+
+Here, the 'num' field refers to the sequential device ID for this specific
+driver. The bridge number (or bus number) can be accessed using
+dev->bridge->num.
+
+A function is also provided to unregister the driver from the VME core and is
+usually called from the device driver's exit routine:
+
+ void vme_unregister_driver (struct vme_driver *driver);
+
+
+Resource management
+===================
+
+Once a driver has registered with the VME core the provided match routine will
+be called the number of times specified during the registration. If a match
+succeeds, a non-zero value should be returned. A zero return value indicates
+failure. For all successful matches, the probe routine of the corresponding
+driver is called. The probe routine is passed a pointer to the devices
+device structure. This pointer should be saved, it will be required for
+requesting VME resources.
+
+The driver can request ownership of one or more master windows, slave windows
+and/or dma channels. Rather than allowing the device driver to request a
+specific window or DMA channel (which may be used by a different driver) this
+driver allows a resource to be assigned based on the required attributes of the
+driver in question:
+
+ struct vme_resource * vme_master_request(struct vme_dev *dev,
+ u32 aspace, u32 cycle, u32 width);
+
+ struct vme_resource * vme_slave_request(struct vme_dev *dev, u32 aspace,
+ u32 cycle);
+
+ struct vme_resource *vme_dma_request(struct vme_dev *dev, u32 route);
+
+For slave windows these attributes are split into the VME address spaces that
+need to be accessed in 'aspace' and VME bus cycle types required in 'cycle'.
+Master windows add a further set of attributes in 'width' specifying the
+required data transfer widths. These attributes are defined as bitmasks and as
+such any combination of the attributes can be requested for a single window,
+the core will assign a window that meets the requirements, returning a pointer
+of type vme_resource that should be used to identify the allocated resource
+when it is used. For DMA controllers, the request function requires the
+potential direction of any transfers to be provided in the route attributes.
+This is typically VME-to-MEM and/or MEM-to-VME, though some hardware can
+support VME-to-VME and MEM-to-MEM transfers as well as test pattern generation.
+If an unallocated window fitting the requirements can not be found a NULL
+pointer will be returned.
+
+Functions are also provided to free window allocations once they are no longer
+required. These functions should be passed the pointer to the resource provided
+during resource allocation:
+
+ void vme_master_free(struct vme_resource *res);
+
+ void vme_slave_free(struct vme_resource *res);
+
+ void vme_dma_free(struct vme_resource *res);
+
+
+Master windows
+==============
+
+Master windows provide access from the local processor[s] out onto the VME bus.
+The number of windows available and the available access modes is dependent on
+the underlying chipset. A window must be configured before it can be used.
+
+
+Master window configuration
+---------------------------
+
+Once a master window has been assigned the following functions can be used to
+configure it and retrieve the current settings:
+
+ int vme_master_set (struct vme_resource *res, int enabled,
+ unsigned long long base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 width);
+
+ int vme_master_get (struct vme_resource *res, int *enabled,
+ unsigned long long *base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *width);
+
+The address spaces, transfer widths and cycle types are the same as described
+under resource management, however some of the options are mutually exclusive.
+For example, only one address space may be specified.
+
+These functions return 0 on success or an error code should the call fail.
+
+
+Master window access
+--------------------
+
+The following functions can be used to read from and write to configured master
+windows. These functions return the number of bytes copied:
+
+ ssize_t vme_master_read(struct vme_resource *res, void *buf,
+ size_t count, loff_t offset);
+
+ ssize_t vme_master_write(struct vme_resource *res, void *buf,
+ size_t count, loff_t offset);
+
+In addition to simple reads and writes, a function is provided to do a
+read-modify-write transaction. This function returns the original value of the
+VME bus location :
+
+ unsigned int vme_master_rmw (struct vme_resource *res,
+ unsigned int mask, unsigned int compare, unsigned int swap,
+ loff_t offset);
+
+This functions by reading the offset, applying the mask. If the bits selected in
+the mask match with the values of the corresponding bits in the compare field,
+the value of swap is written the specified offset.
+
+
+Slave windows
+=============
+
+Slave windows provide devices on the VME bus access into mapped portions of the
+local memory. The number of windows available and the access modes that can be
+used is dependent on the underlying chipset. A window must be configured before
+it can be used.
+
+
+Slave window configuration
+--------------------------
+
+Once a slave window has been assigned the following functions can be used to
+configure it and retrieve the current settings:
+
+ int vme_slave_set (struct vme_resource *res, int enabled,
+ unsigned long long base, unsigned long long size,
+ dma_addr_t mem, u32 aspace, u32 cycle);
+
+ int vme_slave_get (struct vme_resource *res, int *enabled,
+ unsigned long long *base, unsigned long long *size,
+ dma_addr_t *mem, u32 *aspace, u32 *cycle);
+
+The address spaces, transfer widths and cycle types are the same as described
+under resource management, however some of the options are mutually exclusive.
+For example, only one address space may be specified.
+
+These functions return 0 on success or an error code should the call fail.
+
+
+Slave window buffer allocation
+------------------------------
+
+Functions are provided to allow the user to allocate and free a contiguous
+buffers which will be accessible by the VME bridge. These functions do not have
+to be used, other methods can be used to allocate a buffer, though care must be
+taken to ensure that they are contiguous and accessible by the VME bridge:
+
+ void * vme_alloc_consistent(struct vme_resource *res, size_t size,
+ dma_addr_t *mem);
+
+ void vme_free_consistent(struct vme_resource *res, size_t size,
+ void *virt, dma_addr_t mem);
+
+
+Slave window access
+-------------------
+
+Slave windows map local memory onto the VME bus, the standard methods for
+accessing memory should be used.
+
+
+DMA channels
+============
+
+The VME DMA transfer provides the ability to run link-list DMA transfers. The
+API introduces the concept of DMA lists. Each DMA list is a link-list which can
+be passed to a DMA controller. Multiple lists can be created, extended,
+executed, reused and destroyed.
+
+
+List Management
+---------------
+
+The following functions are provided to create and destroy DMA lists. Execution
+of a list will not automatically destroy the list, thus enabling a list to be
+reused for repetitive tasks:
+
+ struct vme_dma_list *vme_new_dma_list(struct vme_resource *res);
+
+ int vme_dma_list_free(struct vme_dma_list *list);
+
+
+List Population
+---------------
+
+An item can be added to a list using the following function ( the source and
+destination attributes need to be created before calling this function, this is
+covered under "Transfer Attributes"):
+
+ int vme_dma_list_add(struct vme_dma_list *list,
+ struct vme_dma_attr *src, struct vme_dma_attr *dest,
+ size_t count);
+
+NOTE: The detailed attributes of the transfers source and destination
+ are not checked until an entry is added to a DMA list, the request
+ for a DMA channel purely checks the directions in which the
+ controller is expected to transfer data. As a result it is
+ possible for this call to return an error, for example if the
+ source or destination is in an unsupported VME address space.
+
+Transfer Attributes
+-------------------
+
+The attributes for the source and destination are handled separately from adding
+an item to a list. This is due to the diverse attributes required for each type
+of source and destination. There are functions to create attributes for PCI, VME
+and pattern sources and destinations (where appropriate):
+
+Pattern source:
+
+ struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type);
+
+PCI source or destination:
+
+ struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t mem);
+
+VME source or destination:
+
+ struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long base,
+ u32 aspace, u32 cycle, u32 width);
+
+The following function should be used to free an attribute:
+
+ void vme_dma_free_attribute(struct vme_dma_attr *attr);
+
+
+List Execution
+--------------
+
+The following function queues a list for execution. The function will return
+once the list has been executed:
+
+ int vme_dma_list_exec(struct vme_dma_list *list);
+
+
+Interrupts
+==========
+
+The VME API provides functions to attach and detach callbacks to specific VME
+level and status ID combinations and for the generation of VME interrupts with
+specific VME level and status IDs.
+
+
+Attaching Interrupt Handlers
+----------------------------
+
+The following functions can be used to attach and free a specific VME level and
+status ID combination. Any given combination can only be assigned a single
+callback function. A void pointer parameter is provided, the value of which is
+passed to the callback function, the use of this pointer is user undefined:
+
+ int vme_irq_request(struct vme_dev *dev, int level, int statid,
+ void (*callback)(int, int, void *), void *priv);
+
+ void vme_irq_free(struct vme_dev *dev, int level, int statid);
+
+The callback parameters are as follows. Care must be taken in writing a callback
+function, callback functions run in interrupt context:
+
+ void callback(int level, int statid, void *priv);
+
+
+Interrupt Generation
+--------------------
+
+The following function can be used to generate a VME interrupt at a given VME
+level and VME status ID:
+
+ int vme_irq_generate(struct vme_dev *dev, int level, int statid);
+
+
+Location monitors
+=================
+
+The VME API provides the following functionality to configure the location
+monitor.
+
+
+Location Monitor Management
+---------------------------
+
+The following functions are provided to request the use of a block of location
+monitors and to free them after they are no longer required:
+
+ struct vme_resource * vme_lm_request(struct vme_dev *dev);
+
+ void vme_lm_free(struct vme_resource * res);
+
+Each block may provide a number of location monitors, monitoring adjacent
+locations. The following function can be used to determine how many locations
+are provided:
+
+ int vme_lm_count(struct vme_resource * res);
+
+
+Location Monitor Configuration
+------------------------------
+
+Once a bank of location monitors has been allocated, the following functions
+are provided to configure the location and mode of the location monitor:
+
+ int vme_lm_set(struct vme_resource *res, unsigned long long base,
+ u32 aspace, u32 cycle);
+
+ int vme_lm_get(struct vme_resource *res, unsigned long long *base,
+ u32 *aspace, u32 *cycle);
+
+
+Location Monitor Use
+--------------------
+
+The following functions allow a callback to be attached and detached from each
+location monitor location. Each location monitor can monitor a number of
+adjacent locations:
+
+ int vme_lm_attach(struct vme_resource *res, int num,
+ void (*callback)(int));
+
+ int vme_lm_detach(struct vme_resource *res, int num);
+
+The callback function is declared as follows.
+
+ void callback(int num);
+
+
+Slot Detection
+==============
+
+This function returns the slot ID of the provided bridge.
+
+ int vme_slot_get(struct vme_dev *dev);
--- /dev/null
+#ifndef _VME_BRIDGE_H_
+#define _VME_BRIDGE_H_
+
+#define VME_CRCSR_BUF_SIZE (508*1024)
+/*
+ * Resource structures
+ */
+struct vme_master_resource {
+ struct list_head list;
+ struct vme_bridge *parent;
+ /*
+ * We are likely to need to access the VME bus in interrupt context, so
+ * protect master routines with a spinlock rather than a mutex.
+ */
+ spinlock_t lock;
+ int locked;
+ int number;
+ u32 address_attr;
+ u32 cycle_attr;
+ u32 width_attr;
+ struct resource bus_resource;
+ void __iomem *kern_base;
+};
+
+struct vme_slave_resource {
+ struct list_head list;
+ struct vme_bridge *parent;
+ struct mutex mtx;
+ int locked;
+ int number;
+ u32 address_attr;
+ u32 cycle_attr;
+};
+
+struct vme_dma_pattern {
+ u32 pattern;
+ u32 type;
+};
+
+struct vme_dma_pci {
+ dma_addr_t address;
+};
+
+struct vme_dma_vme {
+ unsigned long long address;
+ u32 aspace;
+ u32 cycle;
+ u32 dwidth;
+};
+
+struct vme_dma_list {
+ struct list_head list;
+ struct vme_dma_resource *parent;
+ struct list_head entries;
+ struct mutex mtx;
+};
+
+struct vme_dma_resource {
+ struct list_head list;
+ struct vme_bridge *parent;
+ struct mutex mtx;
+ int locked;
+ int number;
+ struct list_head pending;
+ struct list_head running;
+ u32 route_attr;
+};
+
+struct vme_lm_resource {
+ struct list_head list;
+ struct vme_bridge *parent;
+ struct mutex mtx;
+ int locked;
+ int number;
+ int monitors;
+};
+
+struct vme_bus_error {
+ struct list_head list;
+ unsigned long long address;
+ u32 attributes;
+};
+
+struct vme_callback {
+ void (*func)(int, int, void*);
+ void *priv_data;
+};
+
+struct vme_irq {
+ int count;
+ struct vme_callback callback[255];
+};
+
+/* Allow 16 characters for name (including null character) */
+#define VMENAMSIZ 16
+
+/* This structure stores all the information about one bridge
+ * The structure should be dynamically allocated by the driver and one instance
+ * of the structure should be present for each VME chip present in the system.
+ */
+struct vme_bridge {
+ char name[VMENAMSIZ];
+ int num;
+ struct list_head master_resources;
+ struct list_head slave_resources;
+ struct list_head dma_resources;
+ struct list_head lm_resources;
+
+ struct list_head vme_errors; /* List for errors generated on VME */
+ struct list_head devices; /* List of devices on this bridge */
+
+ /* Bridge Info - XXX Move to private structure? */
+ struct device *parent; /* Parent device (eg. pdev->dev for PCI) */
+ void *driver_priv; /* Private pointer for the bridge driver */
+ struct list_head bus_list; /* list of VME buses */
+
+ /* Interrupt callbacks */
+ struct vme_irq irq[7];
+ /* Locking for VME irq callback configuration */
+ struct mutex irq_mtx;
+
+ /* Slave Functions */
+ int (*slave_get) (struct vme_slave_resource *, int *,
+ unsigned long long *, unsigned long long *, dma_addr_t *,
+ u32 *, u32 *);
+ int (*slave_set) (struct vme_slave_resource *, int, unsigned long long,
+ unsigned long long, dma_addr_t, u32, u32);
+
+ /* Master Functions */
+ int (*master_get) (struct vme_master_resource *, int *,
+ unsigned long long *, unsigned long long *, u32 *, u32 *,
+ u32 *);
+ int (*master_set) (struct vme_master_resource *, int,
+ unsigned long long, unsigned long long, u32, u32, u32);
+ ssize_t (*master_read) (struct vme_master_resource *, void *, size_t,
+ loff_t);
+ ssize_t (*master_write) (struct vme_master_resource *, void *, size_t,
+ loff_t);
+ unsigned int (*master_rmw) (struct vme_master_resource *, unsigned int,
+ unsigned int, unsigned int, loff_t);
+
+ /* DMA Functions */
+ int (*dma_list_add) (struct vme_dma_list *, struct vme_dma_attr *,
+ struct vme_dma_attr *, size_t);
+ int (*dma_list_exec) (struct vme_dma_list *);
+ int (*dma_list_empty) (struct vme_dma_list *);
+
+ /* Interrupt Functions */
+ void (*irq_set) (struct vme_bridge *, int, int, int);
+ int (*irq_generate) (struct vme_bridge *, int, int);
+
+ /* Location monitor functions */
+ int (*lm_set) (struct vme_lm_resource *, unsigned long long, u32, u32);
+ int (*lm_get) (struct vme_lm_resource *, unsigned long long *, u32 *,
+ u32 *);
+ int (*lm_attach) (struct vme_lm_resource *, int, void (*callback)(int));
+ int (*lm_detach) (struct vme_lm_resource *, int);
+
+ /* CR/CSR space functions */
+ int (*slot_get) (struct vme_bridge *);
+
+ /* Bridge parent interface */
+ void *(*alloc_consistent)(struct device *dev, size_t size,
+ dma_addr_t *dma);
+ void (*free_consistent)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma);
+};
+
+void vme_irq_handler(struct vme_bridge *, int, int);
+
+int vme_register_bridge(struct vme_bridge *);
+void vme_unregister_bridge(struct vme_bridge *);
+
+#endif /* _VME_BRIDGE_H_ */
--- /dev/null
+#ifndef _VME_H_
+#define _VME_H_
+
+/* Resource Type */
+enum vme_resource_type {
+ VME_MASTER,
+ VME_SLAVE,
+ VME_DMA,
+ VME_LM
+};
+
+/* VME Address Spaces */
+#define VME_A16 0x1
+#define VME_A24 0x2
+#define VME_A32 0x4
+#define VME_A64 0x8
+#define VME_CRCSR 0x10
+#define VME_USER1 0x20
+#define VME_USER2 0x40
+#define VME_USER3 0x80
+#define VME_USER4 0x100
+
+#define VME_A16_MAX 0x10000ULL
+#define VME_A24_MAX 0x1000000ULL
+#define VME_A32_MAX 0x100000000ULL
+#define VME_A64_MAX 0x10000000000000000ULL
+#define VME_CRCSR_MAX 0x1000000ULL
+
+
+/* VME Cycle Types */
+#define VME_SCT 0x1
+#define VME_BLT 0x2
+#define VME_MBLT 0x4
+#define VME_2eVME 0x8
+#define VME_2eSST 0x10
+#define VME_2eSSTB 0x20
+
+#define VME_2eSST160 0x100
+#define VME_2eSST267 0x200
+#define VME_2eSST320 0x400
+
+#define VME_SUPER 0x1000
+#define VME_USER 0x2000
+#define VME_PROG 0x4000
+#define VME_DATA 0x8000
+
+/* VME Data Widths */
+#define VME_D8 0x1
+#define VME_D16 0x2
+#define VME_D32 0x4
+#define VME_D64 0x8
+
+/* Arbitration Scheduling Modes */
+#define VME_R_ROBIN_MODE 0x1
+#define VME_PRIORITY_MODE 0x2
+
+#define VME_DMA_PATTERN (1<<0)
+#define VME_DMA_PCI (1<<1)
+#define VME_DMA_VME (1<<2)
+
+#define VME_DMA_PATTERN_BYTE (1<<0)
+#define VME_DMA_PATTERN_WORD (1<<1)
+#define VME_DMA_PATTERN_INCREMENT (1<<2)
+
+#define VME_DMA_VME_TO_MEM (1<<0)
+#define VME_DMA_MEM_TO_VME (1<<1)
+#define VME_DMA_VME_TO_VME (1<<2)
+#define VME_DMA_MEM_TO_MEM (1<<3)
+#define VME_DMA_PATTERN_TO_VME (1<<4)
+#define VME_DMA_PATTERN_TO_MEM (1<<5)
+
+struct vme_dma_attr {
+ u32 type;
+ void *private;
+};
+
+struct vme_resource {
+ enum vme_resource_type type;
+ struct list_head *entry;
+};
+
+extern struct bus_type vme_bus_type;
+
+/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */
+#define VME_MAX_BRIDGES (sizeof(unsigned int)*8)
+#define VME_MAX_SLOTS 32
+
+#define VME_SLOT_CURRENT -1
+#define VME_SLOT_ALL -2
+
+/**
+ * Structure representing a VME device
+ * @num: The device number
+ * @bridge: Pointer to the bridge device this device is on
+ * @dev: Internal device structure
+ * @drv_list: List of devices (per driver)
+ * @bridge_list: List of devices (per bridge)
+ */
+struct vme_dev {
+ int num;
+ struct vme_bridge *bridge;
+ struct device dev;
+ struct list_head drv_list;
+ struct list_head bridge_list;
+};
+
+struct vme_driver {
+ struct list_head node;
+ const char *name;
+ int (*match)(struct vme_dev *);
+ int (*probe)(struct vme_dev *);
+ int (*remove)(struct vme_dev *);
+ void (*shutdown)(void);
+ struct device_driver driver;
+ struct list_head devices;
+};
+
+void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *);
+void vme_free_consistent(struct vme_resource *, size_t, void *,
+ dma_addr_t);
+
+size_t vme_get_size(struct vme_resource *);
+
+struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
+int vme_slave_set(struct vme_resource *, int, unsigned long long,
+ unsigned long long, dma_addr_t, u32, u32);
+int vme_slave_get(struct vme_resource *, int *, unsigned long long *,
+ unsigned long long *, dma_addr_t *, u32 *, u32 *);
+void vme_slave_free(struct vme_resource *);
+
+struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32);
+int vme_master_set(struct vme_resource *, int, unsigned long long,
+ unsigned long long, u32, u32, u32);
+int vme_master_get(struct vme_resource *, int *, unsigned long long *,
+ unsigned long long *, u32 *, u32 *, u32 *);
+ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
+ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
+unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
+ unsigned int, loff_t);
+void vme_master_free(struct vme_resource *);
+
+struct vme_resource *vme_dma_request(struct vme_dev *, u32);
+struct vme_dma_list *vme_new_dma_list(struct vme_resource *);
+struct vme_dma_attr *vme_dma_pattern_attribute(u32, u32);
+struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t);
+struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32);
+void vme_dma_free_attribute(struct vme_dma_attr *);
+int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *,
+ struct vme_dma_attr *, size_t);
+int vme_dma_list_exec(struct vme_dma_list *);
+int vme_dma_list_free(struct vme_dma_list *);
+int vme_dma_free(struct vme_resource *);
+
+int vme_irq_request(struct vme_dev *, int, int,
+ void (*callback)(int, int, void *), void *);
+void vme_irq_free(struct vme_dev *, int, int);
+int vme_irq_generate(struct vme_dev *, int, int);
+
+struct vme_resource *vme_lm_request(struct vme_dev *);
+int vme_lm_count(struct vme_resource *);
+int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
+int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
+int vme_lm_attach(struct vme_resource *, int, void (*callback)(int));
+int vme_lm_detach(struct vme_resource *, int);
+void vme_lm_free(struct vme_resource *);
+
+int vme_slot_get(struct vme_dev *);
+
+int vme_register_driver(struct vme_driver *, unsigned int);
+void vme_unregister_driver(struct vme_driver *);
+
+
+#endif /* _VME_H_ */
+