2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
30 #include <asm/uaccess.h>
33 #include "../vme_bridge.h"
34 #include "vme_tsi148.h"
36 static int __init
tsi148_init(void);
37 static int tsi148_probe(struct pci_dev
*, const struct pci_device_id
*);
38 static void tsi148_remove(struct pci_dev
*);
39 static void __exit
tsi148_exit(void);
42 int tsi148_slave_set(struct vme_slave_resource
*, int, unsigned long long,
43 unsigned long long, dma_addr_t
, vme_address_t
, vme_cycle_t
);
44 int tsi148_slave_get(struct vme_slave_resource
*, int *, unsigned long long *,
45 unsigned long long *, dma_addr_t
*, vme_address_t
*, vme_cycle_t
*);
47 int tsi148_master_get(struct vme_master_resource
*, int *, unsigned long long *,
48 unsigned long long *, vme_address_t
*, vme_cycle_t
*, vme_width_t
*);
49 int tsi148_master_set(struct vme_master_resource
*, int, unsigned long long,
50 unsigned long long, vme_address_t
, vme_cycle_t
, vme_width_t
);
51 ssize_t
tsi148_master_read(struct vme_master_resource
*, void *, size_t,
53 ssize_t
tsi148_master_write(struct vme_master_resource
*, void *, size_t,
55 unsigned int tsi148_master_rmw(struct vme_master_resource
*, unsigned int,
56 unsigned int, unsigned int, loff_t
);
57 int tsi148_dma_list_add (struct vme_dma_list
*, struct vme_dma_attr
*,
58 struct vme_dma_attr
*, size_t);
59 int tsi148_dma_list_exec(struct vme_dma_list
*);
60 int tsi148_dma_list_empty(struct vme_dma_list
*);
61 int tsi148_generate_irq(int, int);
62 int tsi148_slot_get(void);
67 /* XXX These should all be in a per device structure */
68 struct vme_bridge
*tsi148_bridge
;
69 wait_queue_head_t dma_queue
[2];
70 wait_queue_head_t iack_queue
;
71 void (*lm_callback
[4])(int); /* Called in interrupt handler, be careful! */
74 struct vme_master_resource
*flush_image
;
75 struct mutex vme_rmw
; /* Only one RMW cycle at a time */
76 struct mutex vme_int
; /*
77 * Only one VME interrupt can be
78 * generated at a time, provide locking
81 static char driver_name
[] = "vme_tsi148";
83 static struct pci_device_id tsi148_ids
[] = {
84 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA
, PCI_DEVICE_ID_TUNDRA_TSI148
) },
88 static struct pci_driver tsi148_driver
= {
90 .id_table
= tsi148_ids
,
91 .probe
= tsi148_probe
,
92 .remove
= tsi148_remove
,
95 static void reg_join(unsigned int high
, unsigned int low
,
96 unsigned long long *variable
)
98 *variable
= (unsigned long long)high
<< 32;
99 *variable
|= (unsigned long long)low
;
102 static void reg_split(unsigned long long variable
, unsigned int *high
,
105 *low
= (unsigned int)variable
& 0xFFFFFFFF;
106 *high
= (unsigned int)(variable
>> 32);
110 * Wakes up DMA queue.
112 static u32
tsi148_DMA_irqhandler(int channel_mask
)
116 if (channel_mask
& TSI148_LCSR_INTS_DMA0S
) {
117 wake_up(&dma_queue
[0]);
118 serviced
|= TSI148_LCSR_INTC_DMA0C
;
120 if (channel_mask
& TSI148_LCSR_INTS_DMA1S
) {
121 wake_up(&dma_queue
[1]);
122 serviced
|= TSI148_LCSR_INTC_DMA1C
;
129 * Wake up location monitor queue
131 static u32
tsi148_LM_irqhandler(u32 stat
)
136 for (i
= 0; i
< 4; i
++) {
137 if(stat
& TSI148_LCSR_INTS_LMS
[i
]) {
138 /* We only enable interrupts if the callback is set */
140 serviced
|= TSI148_LCSR_INTC_LMC
[i
];
148 * Wake up mail box queue.
150 * XXX This functionality is not exposed up though API.
152 static u32
tsi148_MB_irqhandler(u32 stat
)
158 for (i
= 0; i
< 4; i
++) {
159 if(stat
& TSI148_LCSR_INTS_MBS
[i
]) {
160 val
= ioread32be(tsi148_bridge
->base
+
161 TSI148_GCSR_MBOX
[i
]);
162 printk("VME Mailbox %d received: 0x%x\n", i
, val
);
163 serviced
|= TSI148_LCSR_INTC_MBC
[i
];
171 * Display error & status message when PERR (PCI) exception interrupt occurs.
173 static u32
tsi148_PERR_irqhandler(void)
176 "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
177 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPAU
),
178 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPAL
),
179 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPAT
)
182 "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
183 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPXA
),
184 ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_EDPXS
)
187 iowrite32be(TSI148_LCSR_EDPAT_EDPCL
,
188 tsi148_bridge
->base
+ TSI148_LCSR_EDPAT
);
190 return TSI148_LCSR_INTC_PERRC
;
194 * Save address and status when VME error interrupt occurs.
196 static u32
tsi148_VERR_irqhandler(void)
198 unsigned int error_addr_high
, error_addr_low
;
199 unsigned long long error_addr
;
201 struct vme_bus_error
*error
;
203 error_addr_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VEAU
);
204 error_addr_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VEAL
);
205 error_attrib
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VEAT
);
207 reg_join(error_addr_high
, error_addr_low
, &error_addr
);
209 /* Check for exception register overflow (we have lost error data) */
210 if(error_attrib
& TSI148_LCSR_VEAT_VEOF
) {
211 printk(KERN_ERR
"VME Bus Exception Overflow Occurred\n");
214 error
= (struct vme_bus_error
*)kmalloc(sizeof (struct vme_bus_error
),
217 error
->address
= error_addr
;
218 error
->attributes
= error_attrib
;
219 list_add_tail(&(error
->list
), &(tsi148_bridge
->vme_errors
));
222 "Unable to alloc memory for VMEbus Error reporting\n");
224 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
225 error_addr
, error_attrib
);
229 iowrite32be(TSI148_LCSR_VEAT_VESCL
,
230 tsi148_bridge
->base
+ TSI148_LCSR_VEAT
);
232 return TSI148_LCSR_INTC_VERRC
;
236 * Wake up IACK queue.
238 static u32
tsi148_IACK_irqhandler(void)
240 wake_up(&iack_queue
);
242 return TSI148_LCSR_INTC_IACKC
;
246 * Calling VME bus interrupt callback if provided.
248 static u32
tsi148_VIRQ_irqhandler(u32 stat
)
250 int vec
, i
, serviced
= 0;
252 for (i
= 7; i
> 0; i
--) {
253 if (stat
& (1 << i
)) {
255 * Note: Even though the registers are defined
256 * as 32-bits in the spec, we only want to issue
257 * 8-bit IACK cycles on the bus, read from offset
260 vec
= ioread8(tsi148_bridge
->base
+
261 TSI148_LCSR_VIACK
[i
] + 3);
263 vme_irq_handler(tsi148_bridge
, i
, vec
);
265 serviced
|= (1 << i
);
273 * Top level interrupt handler. Clears appropriate interrupt status bits and
274 * then calls appropriate sub handler(s).
276 static irqreturn_t
tsi148_irqhandler(int irq
, void *dev_id
)
278 u32 stat
, enable
, serviced
= 0;
280 /* Determine which interrupts are unmasked and set */
281 enable
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
282 stat
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTS
);
284 /* Only look at unmasked interrupts */
287 if (unlikely(!stat
)) {
291 /* Call subhandlers as appropriate */
293 if (stat
& (TSI148_LCSR_INTS_DMA1S
| TSI148_LCSR_INTS_DMA0S
))
294 serviced
|= tsi148_DMA_irqhandler(stat
);
296 /* Location monitor irqs */
297 if (stat
& (TSI148_LCSR_INTS_LM3S
| TSI148_LCSR_INTS_LM2S
|
298 TSI148_LCSR_INTS_LM1S
| TSI148_LCSR_INTS_LM0S
))
299 serviced
|= tsi148_LM_irqhandler(stat
);
302 if (stat
& (TSI148_LCSR_INTS_MB3S
| TSI148_LCSR_INTS_MB2S
|
303 TSI148_LCSR_INTS_MB1S
| TSI148_LCSR_INTS_MB0S
))
304 serviced
|= tsi148_MB_irqhandler(stat
);
307 if (stat
& TSI148_LCSR_INTS_PERRS
)
308 serviced
|= tsi148_PERR_irqhandler();
311 if (stat
& TSI148_LCSR_INTS_VERRS
)
312 serviced
|= tsi148_VERR_irqhandler();
315 if (stat
& TSI148_LCSR_INTS_IACKS
)
316 serviced
|= tsi148_IACK_irqhandler();
319 if (stat
& (TSI148_LCSR_INTS_IRQ7S
| TSI148_LCSR_INTS_IRQ6S
|
320 TSI148_LCSR_INTS_IRQ5S
| TSI148_LCSR_INTS_IRQ4S
|
321 TSI148_LCSR_INTS_IRQ3S
| TSI148_LCSR_INTS_IRQ2S
|
322 TSI148_LCSR_INTS_IRQ1S
))
323 serviced
|= tsi148_VIRQ_irqhandler(stat
);
325 /* Clear serviced interrupts */
326 iowrite32be(serviced
, tsi148_bridge
->base
+ TSI148_LCSR_INTC
);
331 static int tsi148_irq_init(struct vme_bridge
*bridge
)
335 struct pci_dev
*pdev
;
338 pdev
= container_of(bridge
->parent
, struct pci_dev
, dev
);
340 /* Initialise list for VME bus errors */
341 INIT_LIST_HEAD(&(bridge
->vme_errors
));
343 mutex_init(&(bridge
->irq_mtx
));
345 result
= request_irq(pdev
->irq
,
350 dev_err(&pdev
->dev
, "Can't get assigned pci irq vector %02X\n",
355 /* Enable and unmask interrupts */
356 tmp
= TSI148_LCSR_INTEO_DMA1EO
| TSI148_LCSR_INTEO_DMA0EO
|
357 TSI148_LCSR_INTEO_MB3EO
| TSI148_LCSR_INTEO_MB2EO
|
358 TSI148_LCSR_INTEO_MB1EO
| TSI148_LCSR_INTEO_MB0EO
|
359 TSI148_LCSR_INTEO_PERREO
| TSI148_LCSR_INTEO_VERREO
|
360 TSI148_LCSR_INTEO_IACKEO
;
362 /* XXX This leaves the following interrupts masked.
363 * TSI148_LCSR_INTEO_VIEEO
364 * TSI148_LCSR_INTEO_SYSFLEO
365 * TSI148_LCSR_INTEO_ACFLEO
368 /* Don't enable Location Monitor interrupts here - they will be
369 * enabled when the location monitors are properly configured and
370 * a callback has been attached.
371 * TSI148_LCSR_INTEO_LM0EO
372 * TSI148_LCSR_INTEO_LM1EO
373 * TSI148_LCSR_INTEO_LM2EO
374 * TSI148_LCSR_INTEO_LM3EO
377 /* Don't enable VME interrupts until we add a handler, else the board
378 * will respond to it and we don't want that unless it knows how to
379 * properly deal with it.
380 * TSI148_LCSR_INTEO_IRQ7EO
381 * TSI148_LCSR_INTEO_IRQ6EO
382 * TSI148_LCSR_INTEO_IRQ5EO
383 * TSI148_LCSR_INTEO_IRQ4EO
384 * TSI148_LCSR_INTEO_IRQ3EO
385 * TSI148_LCSR_INTEO_IRQ2EO
386 * TSI148_LCSR_INTEO_IRQ1EO
389 iowrite32be(tmp
, bridge
->base
+ TSI148_LCSR_INTEO
);
390 iowrite32be(tmp
, bridge
->base
+ TSI148_LCSR_INTEN
);
395 static void tsi148_irq_exit(struct pci_dev
*pdev
)
397 /* Turn off interrupts */
398 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
399 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
401 /* Clear all interrupts */
402 iowrite32be(0xFFFFFFFF, tsi148_bridge
->base
+ TSI148_LCSR_INTC
);
404 /* Detach interrupt handler */
405 free_irq(pdev
->irq
, pdev
);
409 * Check to see if an IACk has been received, return true (1) or false (0).
411 int tsi148_iack_received(void)
415 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
417 if (tmp
& TSI148_LCSR_VICR_IRQS
)
424 * Configure VME interrupt
426 void tsi148_irq_set(int level
, int state
, int sync
)
428 struct pci_dev
*pdev
;
431 /* We need to do the ordering differently for enabling and disabling */
433 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
434 tmp
&= ~TSI148_LCSR_INTEN_IRQEN
[level
- 1];
435 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
437 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
438 tmp
&= ~TSI148_LCSR_INTEO_IRQEO
[level
- 1];
439 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
442 pdev
= container_of(tsi148_bridge
->parent
,
443 struct pci_dev
, dev
);
445 synchronize_irq(pdev
->irq
);
448 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
449 tmp
|= TSI148_LCSR_INTEO_IRQEO
[level
- 1];
450 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
452 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
453 tmp
|= TSI148_LCSR_INTEN_IRQEN
[level
- 1];
454 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
459 * Generate a VME bus interrupt at the requested level & vector. Wait for
460 * interrupt to be acked.
462 int tsi148_irq_generate(int level
, int statid
)
466 mutex_lock(&(vme_int
));
468 /* Read VICR register */
469 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
472 tmp
= (tmp
& ~TSI148_LCSR_VICR_STID_M
) |
473 (statid
& TSI148_LCSR_VICR_STID_M
);
474 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
476 /* Assert VMEbus IRQ */
477 tmp
= tmp
| TSI148_LCSR_VICR_IRQL
[level
];
478 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
480 /* XXX Consider implementing a timeout? */
481 wait_event_interruptible(iack_queue
, tsi148_iack_received());
483 mutex_unlock(&(vme_int
));
489 * Find the first error in this address range
491 static struct vme_bus_error
*tsi148_find_error(vme_address_t aspace
,
492 unsigned long long address
, size_t count
)
494 struct list_head
*err_pos
;
495 struct vme_bus_error
*vme_err
, *valid
= NULL
;
496 unsigned long long bound
;
498 bound
= address
+ count
;
501 * XXX We are currently not looking at the address space when parsing
502 * for errors. This is because parsing the Address Modifier Codes
503 * is going to be quite resource intensive to do properly. We
504 * should be OK just looking at the addresses and this is certainly
505 * much better than what we had before.
508 /* Iterate through errors */
509 list_for_each(err_pos
, &(tsi148_bridge
->vme_errors
)) {
510 vme_err
= list_entry(err_pos
, struct vme_bus_error
, list
);
511 if((vme_err
->address
>= address
) && (vme_err
->address
< bound
)){
521 * Clear errors in the provided address range.
523 static void tsi148_clear_errors(vme_address_t aspace
,
524 unsigned long long address
, size_t count
)
526 struct list_head
*err_pos
, *temp
;
527 struct vme_bus_error
*vme_err
;
528 unsigned long long bound
;
530 bound
= address
+ count
;
533 * XXX We are currently not looking at the address space when parsing
534 * for errors. This is because parsing the Address Modifier Codes
535 * is going to be quite resource intensive to do properly. We
536 * should be OK just looking at the addresses and this is certainly
537 * much better than what we had before.
540 /* Iterate through errors */
541 list_for_each_safe(err_pos
, temp
, &(tsi148_bridge
->vme_errors
)) {
542 vme_err
= list_entry(err_pos
, struct vme_bus_error
, list
);
544 if((vme_err
->address
>= address
) && (vme_err
->address
< bound
)){
552 * Initialize a slave window with the requested attributes.
554 int tsi148_slave_set(struct vme_slave_resource
*image
, int enabled
,
555 unsigned long long vme_base
, unsigned long long size
,
556 dma_addr_t pci_base
, vme_address_t aspace
, vme_cycle_t cycle
)
558 unsigned int i
, addr
= 0, granularity
= 0;
559 unsigned int temp_ctl
= 0;
560 unsigned int vme_base_low
, vme_base_high
;
561 unsigned int vme_bound_low
, vme_bound_high
;
562 unsigned int pci_offset_low
, pci_offset_high
;
563 unsigned long long vme_bound
, pci_offset
;
566 printk("Set slave image %d to:\n", image
->number
);
567 printk("\tEnabled: %s\n", (enabled
== 1)? "yes" : "no");
568 printk("\tVME Base:0x%llx\n", vme_base
);
569 printk("\tWindow Size:0x%llx\n", size
);
570 printk("\tPCI Base:0x%lx\n", (unsigned long)pci_base
);
571 printk("\tAddress Space:0x%x\n", aspace
);
572 printk("\tTransfer Cycle Properties:0x%x\n", cycle
);
580 addr
|= TSI148_LCSR_ITAT_AS_A16
;
583 granularity
= 0x1000;
584 addr
|= TSI148_LCSR_ITAT_AS_A24
;
587 granularity
= 0x10000;
588 addr
|= TSI148_LCSR_ITAT_AS_A32
;
591 granularity
= 0x10000;
592 addr
|= TSI148_LCSR_ITAT_AS_A64
;
600 printk("Invalid address space\n");
605 /* Convert 64-bit variables to 2x 32-bit variables */
606 reg_split(vme_base
, &vme_base_high
, &vme_base_low
);
609 * Bound address is a valid address for the window, adjust
612 vme_bound
= vme_base
+ size
- granularity
;
613 reg_split(vme_bound
, &vme_bound_high
, &vme_bound_low
);
614 pci_offset
= (unsigned long long)pci_base
- vme_base
;
615 reg_split(pci_offset
, &pci_offset_high
, &pci_offset_low
);
617 if (vme_base_low
& (granularity
- 1)) {
618 printk("Invalid VME base alignment\n");
621 if (vme_bound_low
& (granularity
- 1)) {
622 printk("Invalid VME bound alignment\n");
625 if (pci_offset_low
& (granularity
- 1)) {
626 printk("Invalid PCI Offset alignment\n");
631 printk("\tVME Bound:0x%llx\n", vme_bound
);
632 printk("\tPCI Offset:0x%llx\n", pci_offset
);
635 /* Disable while we are mucking around */
636 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
637 TSI148_LCSR_OFFSET_ITAT
);
638 temp_ctl
&= ~TSI148_LCSR_ITAT_EN
;
639 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
640 TSI148_LCSR_OFFSET_ITAT
);
643 iowrite32be(vme_base_high
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
644 TSI148_LCSR_OFFSET_ITSAU
);
645 iowrite32be(vme_base_low
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
646 TSI148_LCSR_OFFSET_ITSAL
);
647 iowrite32be(vme_bound_high
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
648 TSI148_LCSR_OFFSET_ITEAU
);
649 iowrite32be(vme_bound_low
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
650 TSI148_LCSR_OFFSET_ITEAL
);
651 iowrite32be(pci_offset_high
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
652 TSI148_LCSR_OFFSET_ITOFU
);
653 iowrite32be(pci_offset_low
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
654 TSI148_LCSR_OFFSET_ITOFL
);
656 /* XXX Prefetch stuff currently unsupported */
659 for (x
= 0; x
< 4; x
++) {
660 if ((64 << x
) >= vmeIn
->prefetchSize
) {
666 temp_ctl
|= (x
<< 16);
668 if (vmeIn
->prefetchThreshold
)
669 if (vmeIn
->prefetchThreshold
)
673 /* Setup 2eSST speeds */
674 temp_ctl
&= ~TSI148_LCSR_ITAT_2eSSTM_M
;
675 switch (cycle
& (VME_2eSST160
| VME_2eSST267
| VME_2eSST320
)) {
677 temp_ctl
|= TSI148_LCSR_ITAT_2eSSTM_160
;
680 temp_ctl
|= TSI148_LCSR_ITAT_2eSSTM_267
;
683 temp_ctl
|= TSI148_LCSR_ITAT_2eSSTM_320
;
687 /* Setup cycle types */
688 temp_ctl
&= ~(0x1F << 7);
690 temp_ctl
|= TSI148_LCSR_ITAT_BLT
;
691 if (cycle
& VME_MBLT
)
692 temp_ctl
|= TSI148_LCSR_ITAT_MBLT
;
693 if (cycle
& VME_2eVME
)
694 temp_ctl
|= TSI148_LCSR_ITAT_2eVME
;
695 if (cycle
& VME_2eSST
)
696 temp_ctl
|= TSI148_LCSR_ITAT_2eSST
;
697 if (cycle
& VME_2eSSTB
)
698 temp_ctl
|= TSI148_LCSR_ITAT_2eSSTB
;
700 /* Setup address space */
701 temp_ctl
&= ~TSI148_LCSR_ITAT_AS_M
;
705 if (cycle
& VME_SUPER
)
706 temp_ctl
|= TSI148_LCSR_ITAT_SUPR
;
707 if (cycle
& VME_USER
)
708 temp_ctl
|= TSI148_LCSR_ITAT_NPRIV
;
709 if (cycle
& VME_PROG
)
710 temp_ctl
|= TSI148_LCSR_ITAT_PGM
;
711 if (cycle
& VME_DATA
)
712 temp_ctl
|= TSI148_LCSR_ITAT_DATA
;
714 /* Write ctl reg without enable */
715 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
716 TSI148_LCSR_OFFSET_ITAT
);
719 temp_ctl
|= TSI148_LCSR_ITAT_EN
;
721 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
722 TSI148_LCSR_OFFSET_ITAT
);
728 * Get slave window configuration.
730 * XXX Prefetch currently unsupported.
732 int tsi148_slave_get(struct vme_slave_resource
*image
, int *enabled
,
733 unsigned long long *vme_base
, unsigned long long *size
,
734 dma_addr_t
*pci_base
, vme_address_t
*aspace
, vme_cycle_t
*cycle
)
736 unsigned int i
, granularity
= 0, ctl
= 0;
737 unsigned int vme_base_low
, vme_base_high
;
738 unsigned int vme_bound_low
, vme_bound_high
;
739 unsigned int pci_offset_low
, pci_offset_high
;
740 unsigned long long vme_bound
, pci_offset
;
746 ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
747 TSI148_LCSR_OFFSET_ITAT
);
749 vme_base_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
750 TSI148_LCSR_OFFSET_ITSAU
);
751 vme_base_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
752 TSI148_LCSR_OFFSET_ITSAL
);
753 vme_bound_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
754 TSI148_LCSR_OFFSET_ITEAU
);
755 vme_bound_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
756 TSI148_LCSR_OFFSET_ITEAL
);
757 pci_offset_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
758 TSI148_LCSR_OFFSET_ITOFU
);
759 pci_offset_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
760 TSI148_LCSR_OFFSET_ITOFL
);
762 /* Convert 64-bit variables to 2x 32-bit variables */
763 reg_join(vme_base_high
, vme_base_low
, vme_base
);
764 reg_join(vme_bound_high
, vme_bound_low
, &vme_bound
);
765 reg_join(pci_offset_high
, pci_offset_low
, &pci_offset
);
767 *pci_base
= (dma_addr_t
)vme_base
+ pci_offset
;
773 if (ctl
& TSI148_LCSR_ITAT_EN
)
776 if ((ctl
& TSI148_LCSR_ITAT_AS_M
) == TSI148_LCSR_ITAT_AS_A16
) {
780 if ((ctl
& TSI148_LCSR_ITAT_AS_M
) == TSI148_LCSR_ITAT_AS_A24
) {
781 granularity
= 0x1000;
784 if ((ctl
& TSI148_LCSR_ITAT_AS_M
) == TSI148_LCSR_ITAT_AS_A32
) {
785 granularity
= 0x10000;
788 if ((ctl
& TSI148_LCSR_ITAT_AS_M
) == TSI148_LCSR_ITAT_AS_A64
) {
789 granularity
= 0x10000;
793 /* Need granularity before we set the size */
794 *size
= (unsigned long long)((vme_bound
- *vme_base
) + granularity
);
797 if ((ctl
& TSI148_LCSR_ITAT_2eSSTM_M
) == TSI148_LCSR_ITAT_2eSSTM_160
)
798 *cycle
|= VME_2eSST160
;
799 if ((ctl
& TSI148_LCSR_ITAT_2eSSTM_M
) == TSI148_LCSR_ITAT_2eSSTM_267
)
800 *cycle
|= VME_2eSST267
;
801 if ((ctl
& TSI148_LCSR_ITAT_2eSSTM_M
) == TSI148_LCSR_ITAT_2eSSTM_320
)
802 *cycle
|= VME_2eSST320
;
804 if (ctl
& TSI148_LCSR_ITAT_BLT
)
806 if (ctl
& TSI148_LCSR_ITAT_MBLT
)
808 if (ctl
& TSI148_LCSR_ITAT_2eVME
)
810 if (ctl
& TSI148_LCSR_ITAT_2eSST
)
812 if (ctl
& TSI148_LCSR_ITAT_2eSSTB
)
813 *cycle
|= VME_2eSSTB
;
815 if (ctl
& TSI148_LCSR_ITAT_SUPR
)
817 if (ctl
& TSI148_LCSR_ITAT_NPRIV
)
819 if (ctl
& TSI148_LCSR_ITAT_PGM
)
821 if (ctl
& TSI148_LCSR_ITAT_DATA
)
828 * Allocate and map PCI Resource
830 static int tsi148_alloc_resource(struct vme_master_resource
*image
,
831 unsigned long long size
)
833 unsigned long long existing_size
;
835 struct pci_dev
*pdev
;
837 /* Find pci_dev container of dev */
838 if (tsi148_bridge
->parent
== NULL
) {
839 printk("Dev entry NULL\n");
842 pdev
= container_of(tsi148_bridge
->parent
, struct pci_dev
, dev
);
844 existing_size
= (unsigned long long)(image
->pci_resource
.end
-
845 image
->pci_resource
.start
);
847 /* If the existing size is OK, return */
848 if ((size
!= 0) && (existing_size
== (size
- 1)))
851 if (existing_size
!= 0) {
852 iounmap(image
->kern_base
);
853 image
->kern_base
= NULL
;
854 if (image
->pci_resource
.name
!= NULL
)
855 kfree(image
->pci_resource
.name
);
856 release_resource(&(image
->pci_resource
));
857 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
860 /* Exit here if size is zero */
865 if (image
->pci_resource
.name
== NULL
) {
866 image
->pci_resource
.name
= kmalloc(VMENAMSIZ
+3, GFP_KERNEL
);
867 if (image
->pci_resource
.name
== NULL
) {
868 printk(KERN_ERR
"Unable to allocate memory for resource"
875 sprintf((char *)image
->pci_resource
.name
, "%s.%d", tsi148_bridge
->name
,
878 image
->pci_resource
.start
= 0;
879 image
->pci_resource
.end
= (unsigned long)size
;
880 image
->pci_resource
.flags
= IORESOURCE_MEM
;
882 retval
= pci_bus_alloc_resource(pdev
->bus
,
883 &(image
->pci_resource
), size
, size
, PCIBIOS_MIN_MEM
,
886 printk(KERN_ERR
"Failed to allocate mem resource for "
887 "window %d size 0x%lx start 0x%lx\n",
888 image
->number
, (unsigned long)size
,
889 (unsigned long)image
->pci_resource
.start
);
893 image
->kern_base
= ioremap_nocache(
894 image
->pci_resource
.start
, size
);
895 if (image
->kern_base
== NULL
) {
896 printk(KERN_ERR
"Failed to remap resource\n");
903 iounmap(image
->kern_base
);
904 image
->kern_base
= NULL
;
906 release_resource(&(image
->pci_resource
));
908 kfree(image
->pci_resource
.name
);
909 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
915 * Free and unmap PCI Resource
917 static void tsi148_free_resource(struct vme_master_resource
*image
)
919 iounmap(image
->kern_base
);
920 image
->kern_base
= NULL
;
921 release_resource(&(image
->pci_resource
));
922 kfree(image
->pci_resource
.name
);
923 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
927 * Set the attributes of an outbound window.
929 int tsi148_master_set( struct vme_master_resource
*image
, int enabled
,
930 unsigned long long vme_base
, unsigned long long size
,
931 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
935 unsigned int temp_ctl
= 0;
936 unsigned int pci_base_low
, pci_base_high
;
937 unsigned int pci_bound_low
, pci_bound_high
;
938 unsigned int vme_offset_low
, vme_offset_high
;
939 unsigned long long pci_bound
, vme_offset
, pci_base
;
941 /* Verify input data */
942 if (vme_base
& 0xFFFF) {
943 printk(KERN_ERR
"Invalid VME Window alignment\n");
948 if ((size
== 0) && (enabled
!= 0)) {
949 printk(KERN_ERR
"Size must be non-zero for enabled windows\n");
954 spin_lock(&(image
->lock
));
956 /* Let's allocate the resource here rather than further up the stack as
957 * it avoids pushing loads of bus dependant stuff up the stack. If size
958 * is zero, any existing resource will be freed.
960 retval
= tsi148_alloc_resource(image
, size
);
962 spin_unlock(&(image
->lock
));
963 printk(KERN_ERR
"Unable to allocate memory for "
973 pci_base
= (unsigned long long)image
->pci_resource
.start
;
976 * Bound address is a valid address for the window, adjust
977 * according to window granularity.
979 pci_bound
= pci_base
+ (size
- 0x10000);
980 vme_offset
= vme_base
- pci_base
;
983 /* Convert 64-bit variables to 2x 32-bit variables */
984 reg_split(pci_base
, &pci_base_high
, &pci_base_low
);
985 reg_split(pci_bound
, &pci_bound_high
, &pci_bound_low
);
986 reg_split(vme_offset
, &vme_offset_high
, &vme_offset_low
);
988 if (pci_base_low
& 0xFFFF) {
989 spin_unlock(&(image
->lock
));
990 printk(KERN_ERR
"Invalid PCI base alignment\n");
994 if (pci_bound_low
& 0xFFFF) {
995 spin_unlock(&(image
->lock
));
996 printk(KERN_ERR
"Invalid PCI bound alignment\n");
1000 if (vme_offset_low
& 0xFFFF) {
1001 spin_unlock(&(image
->lock
));
1002 printk(KERN_ERR
"Invalid VME Offset alignment\n");
1009 /* Disable while we are mucking around */
1010 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1011 TSI148_LCSR_OFFSET_OTAT
);
1012 temp_ctl
&= ~TSI148_LCSR_OTAT_EN
;
1013 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1014 TSI148_LCSR_OFFSET_OTAT
);
1016 /* XXX Prefetch stuff currently unsupported */
1018 if (vmeOut
->prefetchEnable
) {
1019 temp_ctl
|= 0x40000;
1020 for (x
= 0; x
< 4; x
++) {
1021 if ((2 << x
) >= vmeOut
->prefetchSize
)
1026 temp_ctl
|= (x
<< 16);
1030 /* Setup 2eSST speeds */
1031 temp_ctl
&= ~TSI148_LCSR_OTAT_2eSSTM_M
;
1032 switch (cycle
& (VME_2eSST160
| VME_2eSST267
| VME_2eSST320
)) {
1034 temp_ctl
|= TSI148_LCSR_OTAT_2eSSTM_160
;
1037 temp_ctl
|= TSI148_LCSR_OTAT_2eSSTM_267
;
1040 temp_ctl
|= TSI148_LCSR_OTAT_2eSSTM_320
;
1044 /* Setup cycle types */
1045 if (cycle
& VME_BLT
) {
1046 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1047 temp_ctl
|= TSI148_LCSR_OTAT_TM_BLT
;
1049 if (cycle
& VME_MBLT
) {
1050 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1051 temp_ctl
|= TSI148_LCSR_OTAT_TM_MBLT
;
1053 if (cycle
& VME_2eVME
) {
1054 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1055 temp_ctl
|= TSI148_LCSR_OTAT_TM_2eVME
;
1057 if (cycle
& VME_2eSST
) {
1058 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1059 temp_ctl
|= TSI148_LCSR_OTAT_TM_2eSST
;
1061 if (cycle
& VME_2eSSTB
) {
1062 printk(KERN_WARNING
"Currently not setting Broadcast Select "
1064 temp_ctl
&= ~TSI148_LCSR_OTAT_TM_M
;
1065 temp_ctl
|= TSI148_LCSR_OTAT_TM_2eSSTB
;
1068 /* Setup data width */
1069 temp_ctl
&= ~TSI148_LCSR_OTAT_DBW_M
;
1072 temp_ctl
|= TSI148_LCSR_OTAT_DBW_16
;
1075 temp_ctl
|= TSI148_LCSR_OTAT_DBW_32
;
1078 spin_unlock(&(image
->lock
));
1079 printk(KERN_ERR
"Invalid data width\n");
1084 /* Setup address space */
1085 temp_ctl
&= ~TSI148_LCSR_OTAT_AMODE_M
;
1088 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_A16
;
1091 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_A24
;
1094 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_A32
;
1097 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_A64
;
1100 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_CRCSR
;
1103 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_USER1
;
1106 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_USER2
;
1109 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_USER3
;
1112 temp_ctl
|= TSI148_LCSR_OTAT_AMODE_USER4
;
1115 spin_unlock(&(image
->lock
));
1116 printk(KERN_ERR
"Invalid address space\n");
1122 temp_ctl
&= ~(3<<4);
1123 if (cycle
& VME_SUPER
)
1124 temp_ctl
|= TSI148_LCSR_OTAT_SUP
;
1125 if (cycle
& VME_PROG
)
1126 temp_ctl
|= TSI148_LCSR_OTAT_PGM
;
1129 iowrite32be(pci_base_high
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1130 TSI148_LCSR_OFFSET_OTSAU
);
1131 iowrite32be(pci_base_low
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1132 TSI148_LCSR_OFFSET_OTSAL
);
1133 iowrite32be(pci_bound_high
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1134 TSI148_LCSR_OFFSET_OTEAU
);
1135 iowrite32be(pci_bound_low
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1136 TSI148_LCSR_OFFSET_OTEAL
);
1137 iowrite32be(vme_offset_high
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1138 TSI148_LCSR_OFFSET_OTOFU
);
1139 iowrite32be(vme_offset_low
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1140 TSI148_LCSR_OFFSET_OTOFL
);
1142 /* XXX We need to deal with OTBS */
1144 iowrite32be(vmeOut
->bcastSelect2esst
, tsi148_bridge
->base
+
1145 TSI148_LCSR_OT
[i
] + TSI148_LCSR_OFFSET_OTBS
);
1148 /* Write ctl reg without enable */
1149 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1150 TSI148_LCSR_OFFSET_OTAT
);
1153 temp_ctl
|= TSI148_LCSR_OTAT_EN
;
1155 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1156 TSI148_LCSR_OFFSET_OTAT
);
1158 spin_unlock(&(image
->lock
));
1164 tsi148_free_resource(image
);
1172 * Set the attributes of an outbound window.
1174 * XXX Not parsing prefetch information.
1176 int __tsi148_master_get( struct vme_master_resource
*image
, int *enabled
,
1177 unsigned long long *vme_base
, unsigned long long *size
,
1178 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
1180 unsigned int i
, ctl
;
1181 unsigned int pci_base_low
, pci_base_high
;
1182 unsigned int pci_bound_low
, pci_bound_high
;
1183 unsigned int vme_offset_low
, vme_offset_high
;
1185 unsigned long long pci_base
, pci_bound
, vme_offset
;
1189 ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1190 TSI148_LCSR_OFFSET_OTAT
);
1192 pci_base_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1193 TSI148_LCSR_OFFSET_OTSAU
);
1194 pci_base_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1195 TSI148_LCSR_OFFSET_OTSAL
);
1196 pci_bound_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1197 TSI148_LCSR_OFFSET_OTEAU
);
1198 pci_bound_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1199 TSI148_LCSR_OFFSET_OTEAL
);
1200 vme_offset_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1201 TSI148_LCSR_OFFSET_OTOFU
);
1202 vme_offset_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1203 TSI148_LCSR_OFFSET_OTOFL
);
1205 /* Convert 64-bit variables to 2x 32-bit variables */
1206 reg_join(pci_base_high
, pci_base_low
, &pci_base
);
1207 reg_join(pci_bound_high
, pci_bound_low
, &pci_bound
);
1208 reg_join(vme_offset_high
, vme_offset_low
, &vme_offset
);
1210 *vme_base
= pci_base
+ vme_offset
;
1211 *size
= (unsigned long long)(pci_bound
- pci_base
) + 0x10000;
1218 if (ctl
& TSI148_LCSR_OTAT_EN
)
1221 /* Setup address space */
1222 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_A16
)
1224 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_A24
)
1226 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_A32
)
1228 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_A64
)
1230 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_CRCSR
)
1231 *aspace
|= VME_CRCSR
;
1232 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_USER1
)
1233 *aspace
|= VME_USER1
;
1234 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_USER2
)
1235 *aspace
|= VME_USER2
;
1236 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_USER3
)
1237 *aspace
|= VME_USER3
;
1238 if ((ctl
& TSI148_LCSR_OTAT_AMODE_M
) == TSI148_LCSR_OTAT_AMODE_USER4
)
1239 *aspace
|= VME_USER4
;
1241 /* Setup 2eSST speeds */
1242 if ((ctl
& TSI148_LCSR_OTAT_2eSSTM_M
) == TSI148_LCSR_OTAT_2eSSTM_160
)
1243 *cycle
|= VME_2eSST160
;
1244 if ((ctl
& TSI148_LCSR_OTAT_2eSSTM_M
) == TSI148_LCSR_OTAT_2eSSTM_267
)
1245 *cycle
|= VME_2eSST267
;
1246 if ((ctl
& TSI148_LCSR_OTAT_2eSSTM_M
) == TSI148_LCSR_OTAT_2eSSTM_320
)
1247 *cycle
|= VME_2eSST320
;
1249 /* Setup cycle types */
1250 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_SCT
)
1252 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_BLT
)
1254 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_MBLT
)
1256 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_2eVME
)
1257 *cycle
|= VME_2eVME
;
1258 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_2eSST
)
1259 *cycle
|= VME_2eSST
;
1260 if ((ctl
& TSI148_LCSR_OTAT_TM_M
) == TSI148_LCSR_OTAT_TM_2eSSTB
)
1261 *cycle
|= VME_2eSSTB
;
1263 if (ctl
& TSI148_LCSR_OTAT_SUP
)
1264 *cycle
|= VME_SUPER
;
1268 if (ctl
& TSI148_LCSR_OTAT_PGM
)
1273 /* Setup data width */
1274 if ((ctl
& TSI148_LCSR_OTAT_DBW_M
) == TSI148_LCSR_OTAT_DBW_16
)
1276 if ((ctl
& TSI148_LCSR_OTAT_DBW_M
) == TSI148_LCSR_OTAT_DBW_32
)
1283 int tsi148_master_get( struct vme_master_resource
*image
, int *enabled
,
1284 unsigned long long *vme_base
, unsigned long long *size
,
1285 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
1289 spin_lock(&(image
->lock
));
1291 retval
= __tsi148_master_get(image
, enabled
, vme_base
, size
, aspace
,
1294 spin_unlock(&(image
->lock
));
1299 ssize_t
tsi148_master_read(struct vme_master_resource
*image
, void *buf
,
1300 size_t count
, loff_t offset
)
1302 int retval
, enabled
;
1303 unsigned long long vme_base
, size
;
1304 vme_address_t aspace
;
1307 struct vme_bus_error
*vme_err
= NULL
;
1309 spin_lock(&(image
->lock
));
1311 memcpy_fromio(buf
, image
->kern_base
+ offset
, (unsigned int)count
);
1317 __tsi148_master_get(image
, &enabled
, &vme_base
, &size
, &aspace
, &cycle
,
1320 vme_err
= tsi148_find_error(aspace
, vme_base
+ offset
, count
);
1321 if(vme_err
!= NULL
) {
1322 dev_err(image
->parent
->parent
, "First VME read error detected "
1323 "an at address 0x%llx\n", vme_err
->address
);
1324 retval
= vme_err
->address
- (vme_base
+ offset
);
1325 /* Clear down save errors in this address range */
1326 tsi148_clear_errors(aspace
, vme_base
+ offset
, count
);
1330 spin_unlock(&(image
->lock
));
1336 /* XXX We need to change vme_master_resource->mtx to a spinlock so that read
1337 * and write functions can be used in an interrupt context
1339 ssize_t
tsi148_master_write(struct vme_master_resource
*image
, void *buf
,
1340 size_t count
, loff_t offset
)
1342 int retval
= 0, enabled
;
1343 unsigned long long vme_base
, size
;
1344 vme_address_t aspace
;
1348 struct vme_bus_error
*vme_err
= NULL
;
1350 spin_lock(&(image
->lock
));
1352 memcpy_toio(image
->kern_base
+ offset
, buf
, (unsigned int)count
);
1356 * Writes are posted. We need to do a read on the VME bus to flush out
1357 * all of the writes before we check for errors. We can't guarentee
1358 * that reading the data we have just written is safe. It is believed
1359 * that there isn't any read, write re-ordering, so we can read any
1360 * location in VME space, so lets read the Device ID from the tsi148's
1361 * own registers as mapped into CR/CSR space.
1363 * We check for saved errors in the written address range/space.
1370 * Get window info first, to maximise the time that the buffers may
1371 * fluch on their own
1373 __tsi148_master_get(image
, &enabled
, &vme_base
, &size
, &aspace
, &cycle
,
1376 ioread16(flush_image
->kern_base
+ 0x7F000);
1378 vme_err
= tsi148_find_error(aspace
, vme_base
+ offset
, count
);
1379 if(vme_err
!= NULL
) {
1380 printk("First VME write error detected an at address 0x%llx\n",
1382 retval
= vme_err
->address
- (vme_base
+ offset
);
1383 /* Clear down save errors in this address range */
1384 tsi148_clear_errors(aspace
, vme_base
+ offset
, count
);
1388 spin_unlock(&(image
->lock
));
1394 * Perform an RMW cycle on the VME bus.
1396 * Requires a previously configured master window, returns final value.
1398 unsigned int tsi148_master_rmw(struct vme_master_resource
*image
,
1399 unsigned int mask
, unsigned int compare
, unsigned int swap
,
1402 unsigned long long pci_addr
;
1403 unsigned int pci_addr_high
, pci_addr_low
;
1408 /* Find the PCI address that maps to the desired VME address */
1411 /* Locking as we can only do one of these at a time */
1412 mutex_lock(&(vme_rmw
));
1415 spin_lock(&(image
->lock
));
1417 pci_addr_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1418 TSI148_LCSR_OFFSET_OTSAU
);
1419 pci_addr_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
1420 TSI148_LCSR_OFFSET_OTSAL
);
1422 reg_join(pci_addr_high
, pci_addr_low
, &pci_addr
);
1423 reg_split(pci_addr
+ offset
, &pci_addr_high
, &pci_addr_low
);
1425 /* Configure registers */
1426 iowrite32be(mask
, tsi148_bridge
->base
+ TSI148_LCSR_RMWEN
);
1427 iowrite32be(compare
, tsi148_bridge
->base
+ TSI148_LCSR_RMWC
);
1428 iowrite32be(swap
, tsi148_bridge
->base
+ TSI148_LCSR_RMWS
);
1429 iowrite32be(pci_addr_high
, tsi148_bridge
->base
+ TSI148_LCSR_RMWAU
);
1430 iowrite32be(pci_addr_low
, tsi148_bridge
->base
+ TSI148_LCSR_RMWAL
);
1433 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
1434 tmp
|= TSI148_LCSR_VMCTRL_RMWEN
;
1435 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
1437 /* Kick process off with a read to the required address. */
1438 result
= ioread32be(image
->kern_base
+ offset
);
1441 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
1442 tmp
&= ~TSI148_LCSR_VMCTRL_RMWEN
;
1443 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
1445 spin_unlock(&(image
->lock
));
1447 mutex_unlock(&(vme_rmw
));
1452 static int tsi148_dma_set_vme_src_attributes (u32
*attr
, vme_address_t aspace
,
1453 vme_cycle_t cycle
, vme_width_t dwidth
)
1455 /* Setup 2eSST speeds */
1456 switch (cycle
& (VME_2eSST160
| VME_2eSST267
| VME_2eSST320
)) {
1458 *attr
|= TSI148_LCSR_DSAT_2eSSTM_160
;
1461 *attr
|= TSI148_LCSR_DSAT_2eSSTM_267
;
1464 *attr
|= TSI148_LCSR_DSAT_2eSSTM_320
;
1468 /* Setup cycle types */
1469 if (cycle
& VME_SCT
) {
1470 *attr
|= TSI148_LCSR_DSAT_TM_SCT
;
1472 if (cycle
& VME_BLT
) {
1473 *attr
|= TSI148_LCSR_DSAT_TM_BLT
;
1475 if (cycle
& VME_MBLT
) {
1476 *attr
|= TSI148_LCSR_DSAT_TM_MBLT
;
1478 if (cycle
& VME_2eVME
) {
1479 *attr
|= TSI148_LCSR_DSAT_TM_2eVME
;
1481 if (cycle
& VME_2eSST
) {
1482 *attr
|= TSI148_LCSR_DSAT_TM_2eSST
;
1484 if (cycle
& VME_2eSSTB
) {
1485 printk("Currently not setting Broadcast Select Registers\n");
1486 *attr
|= TSI148_LCSR_DSAT_TM_2eSSTB
;
1489 /* Setup data width */
1492 *attr
|= TSI148_LCSR_DSAT_DBW_16
;
1495 *attr
|= TSI148_LCSR_DSAT_DBW_32
;
1498 printk("Invalid data width\n");
1502 /* Setup address space */
1505 *attr
|= TSI148_LCSR_DSAT_AMODE_A16
;
1508 *attr
|= TSI148_LCSR_DSAT_AMODE_A24
;
1511 *attr
|= TSI148_LCSR_DSAT_AMODE_A32
;
1514 *attr
|= TSI148_LCSR_DSAT_AMODE_A64
;
1517 *attr
|= TSI148_LCSR_DSAT_AMODE_CRCSR
;
1520 *attr
|= TSI148_LCSR_DSAT_AMODE_USER1
;
1523 *attr
|= TSI148_LCSR_DSAT_AMODE_USER2
;
1526 *attr
|= TSI148_LCSR_DSAT_AMODE_USER3
;
1529 *attr
|= TSI148_LCSR_DSAT_AMODE_USER4
;
1532 printk("Invalid address space\n");
1537 if (cycle
& VME_SUPER
)
1538 *attr
|= TSI148_LCSR_DSAT_SUP
;
1539 if (cycle
& VME_PROG
)
1540 *attr
|= TSI148_LCSR_DSAT_PGM
;
1545 static int tsi148_dma_set_vme_dest_attributes(u32
*attr
, vme_address_t aspace
,
1546 vme_cycle_t cycle
, vme_width_t dwidth
)
1548 /* Setup 2eSST speeds */
1549 switch (cycle
& (VME_2eSST160
| VME_2eSST267
| VME_2eSST320
)) {
1551 *attr
|= TSI148_LCSR_DDAT_2eSSTM_160
;
1554 *attr
|= TSI148_LCSR_DDAT_2eSSTM_267
;
1557 *attr
|= TSI148_LCSR_DDAT_2eSSTM_320
;
1561 /* Setup cycle types */
1562 if (cycle
& VME_SCT
) {
1563 *attr
|= TSI148_LCSR_DDAT_TM_SCT
;
1565 if (cycle
& VME_BLT
) {
1566 *attr
|= TSI148_LCSR_DDAT_TM_BLT
;
1568 if (cycle
& VME_MBLT
) {
1569 *attr
|= TSI148_LCSR_DDAT_TM_MBLT
;
1571 if (cycle
& VME_2eVME
) {
1572 *attr
|= TSI148_LCSR_DDAT_TM_2eVME
;
1574 if (cycle
& VME_2eSST
) {
1575 *attr
|= TSI148_LCSR_DDAT_TM_2eSST
;
1577 if (cycle
& VME_2eSSTB
) {
1578 printk("Currently not setting Broadcast Select Registers\n");
1579 *attr
|= TSI148_LCSR_DDAT_TM_2eSSTB
;
1582 /* Setup data width */
1585 *attr
|= TSI148_LCSR_DDAT_DBW_16
;
1588 *attr
|= TSI148_LCSR_DDAT_DBW_32
;
1591 printk("Invalid data width\n");
1595 /* Setup address space */
1598 *attr
|= TSI148_LCSR_DDAT_AMODE_A16
;
1601 *attr
|= TSI148_LCSR_DDAT_AMODE_A24
;
1604 *attr
|= TSI148_LCSR_DDAT_AMODE_A32
;
1607 *attr
|= TSI148_LCSR_DDAT_AMODE_A64
;
1610 *attr
|= TSI148_LCSR_DDAT_AMODE_CRCSR
;
1613 *attr
|= TSI148_LCSR_DDAT_AMODE_USER1
;
1616 *attr
|= TSI148_LCSR_DDAT_AMODE_USER2
;
1619 *attr
|= TSI148_LCSR_DDAT_AMODE_USER3
;
1622 *attr
|= TSI148_LCSR_DDAT_AMODE_USER4
;
1625 printk("Invalid address space\n");
1630 if (cycle
& VME_SUPER
)
1631 *attr
|= TSI148_LCSR_DDAT_SUP
;
1632 if (cycle
& VME_PROG
)
1633 *attr
|= TSI148_LCSR_DDAT_PGM
;
1639 * Add a link list descriptor to the list
1641 * XXX Need to handle 2eSST Broadcast select bits
1643 int tsi148_dma_list_add (struct vme_dma_list
*list
, struct vme_dma_attr
*src
,
1644 struct vme_dma_attr
*dest
, size_t count
)
1646 struct tsi148_dma_entry
*entry
, *prev
;
1647 u32 address_high
, address_low
;
1648 struct vme_dma_pattern
*pattern_attr
;
1649 struct vme_dma_pci
*pci_attr
;
1650 struct vme_dma_vme
*vme_attr
;
1651 dma_addr_t desc_ptr
;
1654 /* XXX descriptor must be aligned on 64-bit boundaries */
1655 entry
= (struct tsi148_dma_entry
*)kmalloc(
1656 sizeof(struct tsi148_dma_entry
), GFP_KERNEL
);
1657 if (entry
== NULL
) {
1658 printk("Failed to allocate memory for dma resource "
1664 /* Test descriptor alignment */
1665 if ((unsigned long)&(entry
->descriptor
) & 0x7) {
1666 printk("Descriptor not aligned to 8 byte boundary as "
1667 "required: %p\n", &(entry
->descriptor
));
1672 /* Given we are going to fill out the structure, we probably don't
1673 * need to zero it, but better safe than sorry for now.
1675 memset(&(entry
->descriptor
), 0, sizeof(struct tsi148_dma_descriptor
));
1677 /* Fill out source part */
1678 switch (src
->type
) {
1679 case VME_DMA_PATTERN
:
1680 pattern_attr
= (struct vme_dma_pattern
*)src
->private;
1682 entry
->descriptor
.dsal
= pattern_attr
->pattern
;
1683 entry
->descriptor
.dsat
= TSI148_LCSR_DSAT_TYP_PAT
;
1684 /* Default behaviour is 32 bit pattern */
1685 if (pattern_attr
->type
& VME_DMA_PATTERN_BYTE
) {
1686 entry
->descriptor
.dsat
|= TSI148_LCSR_DSAT_PSZ
;
1688 /* It seems that the default behaviour is to increment */
1689 if ((pattern_attr
->type
& VME_DMA_PATTERN_INCREMENT
) == 0) {
1690 entry
->descriptor
.dsat
|= TSI148_LCSR_DSAT_NIN
;
1694 pci_attr
= (struct vme_dma_pci
*)src
->private;
1696 reg_split((unsigned long long)pci_attr
->address
, &address_high
,
1698 entry
->descriptor
.dsau
= address_high
;
1699 entry
->descriptor
.dsal
= address_low
;
1700 entry
->descriptor
.dsat
= TSI148_LCSR_DSAT_TYP_PCI
;
1703 vme_attr
= (struct vme_dma_vme
*)src
->private;
1705 reg_split((unsigned long long)vme_attr
->address
, &address_high
,
1707 entry
->descriptor
.dsau
= address_high
;
1708 entry
->descriptor
.dsal
= address_low
;
1709 entry
->descriptor
.dsat
= TSI148_LCSR_DSAT_TYP_VME
;
1711 retval
= tsi148_dma_set_vme_src_attributes(
1712 &(entry
->descriptor
.dsat
), vme_attr
->aspace
,
1713 vme_attr
->cycle
, vme_attr
->dwidth
);
1718 printk("Invalid source type\n");
1724 /* Assume last link - this will be over-written by adding another */
1725 entry
->descriptor
.dnlau
= 0;
1726 entry
->descriptor
.dnlal
= TSI148_LCSR_DNLAL_LLA
;
1729 /* Fill out destination part */
1730 switch (dest
->type
) {
1732 pci_attr
= (struct vme_dma_pci
*)dest
->private;
1734 reg_split((unsigned long long)pci_attr
->address
, &address_high
,
1736 entry
->descriptor
.ddau
= address_high
;
1737 entry
->descriptor
.ddal
= address_low
;
1738 entry
->descriptor
.ddat
= TSI148_LCSR_DDAT_TYP_PCI
;
1741 vme_attr
= (struct vme_dma_vme
*)dest
->private;
1743 reg_split((unsigned long long)vme_attr
->address
, &address_high
,
1745 entry
->descriptor
.ddau
= address_high
;
1746 entry
->descriptor
.ddal
= address_low
;
1747 entry
->descriptor
.ddat
= TSI148_LCSR_DDAT_TYP_VME
;
1749 retval
= tsi148_dma_set_vme_dest_attributes(
1750 &(entry
->descriptor
.ddat
), vme_attr
->aspace
,
1751 vme_attr
->cycle
, vme_attr
->dwidth
);
1756 printk("Invalid destination type\n");
1762 /* Fill out count */
1763 entry
->descriptor
.dcnt
= (u32
)count
;
1766 list_add_tail(&(entry
->list
), &(list
->entries
));
1768 /* Fill out previous descriptors "Next Address" */
1769 if(entry
->list
.prev
!= &(list
->entries
)){
1770 prev
= list_entry(entry
->list
.prev
, struct tsi148_dma_entry
,
1772 /* We need the bus address for the pointer */
1773 desc_ptr
= virt_to_bus(&(entry
->descriptor
));
1774 reg_split(desc_ptr
, &(prev
->descriptor
.dnlau
),
1775 &(prev
->descriptor
.dnlal
));
1789 * Check to see if the provided DMA channel is busy.
1791 static int tsi148_dma_busy(int channel
)
1795 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_DMA
[channel
] +
1796 TSI148_LCSR_OFFSET_DSTA
);
1798 if (tmp
& TSI148_LCSR_DSTA_BSY
)
1806 * Execute a previously generated link list
1808 * XXX Need to provide control register configuration.
1810 int tsi148_dma_list_exec(struct vme_dma_list
*list
)
1812 struct vme_dma_resource
*ctrlr
;
1813 int channel
, retval
= 0;
1814 struct tsi148_dma_entry
*entry
;
1815 dma_addr_t bus_addr
;
1816 u32 bus_addr_high
, bus_addr_low
;
1817 u32 val
, dctlreg
= 0;
1822 ctrlr
= list
->parent
;
1824 mutex_lock(&(ctrlr
->mtx
));
1826 channel
= ctrlr
->number
;
1828 if (! list_empty(&(ctrlr
->running
))) {
1830 * XXX We have an active DMA transfer and currently haven't
1831 * sorted out the mechanism for "pending" DMA transfers.
1834 /* Need to add to pending here */
1835 mutex_unlock(&(ctrlr
->mtx
));
1838 list_add(&(list
->list
), &(ctrlr
->running
));
1841 /* XXX Still todo */
1842 for (x
= 0; x
< 8; x
++) { /* vme block size */
1843 if ((32 << x
) >= vmeDma
->maxVmeBlockSize
) {
1849 dctlreg
|= (x
<< 12);
1851 for (x
= 0; x
< 8; x
++) { /* pci block size */
1852 if ((32 << x
) >= vmeDma
->maxPciBlockSize
) {
1858 dctlreg
|= (x
<< 4);
1860 if (vmeDma
->vmeBackOffTimer
) {
1861 for (x
= 1; x
< 8; x
++) { /* vme timer */
1862 if ((1 << (x
- 1)) >= vmeDma
->vmeBackOffTimer
) {
1868 dctlreg
|= (x
<< 8);
1871 if (vmeDma
->pciBackOffTimer
) {
1872 for (x
= 1; x
< 8; x
++) { /* pci timer */
1873 if ((1 << (x
- 1)) >= vmeDma
->pciBackOffTimer
) {
1879 dctlreg
|= (x
<< 0);
1883 /* Get first bus address and write into registers */
1884 entry
= list_first_entry(&(list
->entries
), struct tsi148_dma_entry
,
1887 bus_addr
= virt_to_bus(&(entry
->descriptor
));
1889 mutex_unlock(&(ctrlr
->mtx
));
1891 reg_split(bus_addr
, &bus_addr_high
, &bus_addr_low
);
1893 iowrite32be(bus_addr_high
, tsi148_bridge
->base
+
1894 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DNLAU
);
1895 iowrite32be(bus_addr_low
, tsi148_bridge
->base
+
1896 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DNLAL
);
1898 /* Start the operation */
1899 iowrite32be(dctlreg
| TSI148_LCSR_DCTL_DGO
, tsi148_bridge
->base
+
1900 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DCTL
);
1902 wait_event_interruptible(dma_queue
[channel
], tsi148_dma_busy(channel
));
1904 * Read status register, this register is valid until we kick off a
1907 val
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_DMA
[channel
] +
1908 TSI148_LCSR_OFFSET_DSTA
);
1910 if (val
& TSI148_LCSR_DSTA_VBE
) {
1911 printk(KERN_ERR
"tsi148: DMA Error. DSTA=%08X\n", val
);
1915 /* Remove list from running list */
1916 mutex_lock(&(ctrlr
->mtx
));
1917 list_del(&(list
->list
));
1918 mutex_unlock(&(ctrlr
->mtx
));
1924 * Clean up a previously generated link list
1926 * We have a separate function, don't assume that the chain can't be reused.
1928 int tsi148_dma_list_empty(struct vme_dma_list
*list
)
1930 struct list_head
*pos
, *temp
;
1931 struct tsi148_dma_entry
*entry
;
1933 /* detach and free each entry */
1934 list_for_each_safe(pos
, temp
, &(list
->entries
)) {
1936 entry
= list_entry(pos
, struct tsi148_dma_entry
, list
);
1944 * All 4 location monitors reside at the same base - this is therefore a
1945 * system wide configuration.
1947 * This does not enable the LM monitor - that should be done when the first
1948 * callback is attached and disabled when the last callback is removed.
1950 int tsi148_lm_set(struct vme_lm_resource
*lm
, unsigned long long lm_base
,
1951 vme_address_t aspace
, vme_cycle_t cycle
)
1953 u32 lm_base_high
, lm_base_low
, lm_ctl
= 0;
1956 mutex_lock(&(lm
->mtx
));
1958 /* If we already have a callback attached, we can't move it! */
1959 for (i
= 0; i
< lm
->monitors
; i
++) {
1960 if(lm_callback
[i
] != NULL
) {
1961 mutex_unlock(&(lm
->mtx
));
1962 printk("Location monitor callback attached, can't "
1970 lm_ctl
|= TSI148_LCSR_LMAT_AS_A16
;
1973 lm_ctl
|= TSI148_LCSR_LMAT_AS_A24
;
1976 lm_ctl
|= TSI148_LCSR_LMAT_AS_A32
;
1979 lm_ctl
|= TSI148_LCSR_LMAT_AS_A64
;
1982 mutex_unlock(&(lm
->mtx
));
1983 printk("Invalid address space\n");
1988 if (cycle
& VME_SUPER
)
1989 lm_ctl
|= TSI148_LCSR_LMAT_SUPR
;
1990 if (cycle
& VME_USER
)
1991 lm_ctl
|= TSI148_LCSR_LMAT_NPRIV
;
1992 if (cycle
& VME_PROG
)
1993 lm_ctl
|= TSI148_LCSR_LMAT_PGM
;
1994 if (cycle
& VME_DATA
)
1995 lm_ctl
|= TSI148_LCSR_LMAT_DATA
;
1997 reg_split(lm_base
, &lm_base_high
, &lm_base_low
);
1999 iowrite32be(lm_base_high
, tsi148_bridge
->base
+ TSI148_LCSR_LMBAU
);
2000 iowrite32be(lm_base_low
, tsi148_bridge
->base
+ TSI148_LCSR_LMBAL
);
2001 iowrite32be(lm_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2003 mutex_unlock(&(lm
->mtx
));
2008 /* Get configuration of the callback monitor and return whether it is enabled
2011 int tsi148_lm_get(struct vme_lm_resource
*lm
, unsigned long long *lm_base
,
2012 vme_address_t
*aspace
, vme_cycle_t
*cycle
)
2014 u32 lm_base_high
, lm_base_low
, lm_ctl
, enabled
= 0;
2016 mutex_lock(&(lm
->mtx
));
2018 lm_base_high
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMBAU
);
2019 lm_base_low
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMBAL
);
2020 lm_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2022 reg_join(lm_base_high
, lm_base_low
, lm_base
);
2024 if (lm_ctl
& TSI148_LCSR_LMAT_EN
)
2027 if ((lm_ctl
& TSI148_LCSR_LMAT_AS_M
) == TSI148_LCSR_LMAT_AS_A16
) {
2030 if ((lm_ctl
& TSI148_LCSR_LMAT_AS_M
) == TSI148_LCSR_LMAT_AS_A24
) {
2033 if ((lm_ctl
& TSI148_LCSR_LMAT_AS_M
) == TSI148_LCSR_LMAT_AS_A32
) {
2036 if ((lm_ctl
& TSI148_LCSR_LMAT_AS_M
) == TSI148_LCSR_LMAT_AS_A64
) {
2040 if (lm_ctl
& TSI148_LCSR_LMAT_SUPR
)
2041 *cycle
|= VME_SUPER
;
2042 if (lm_ctl
& TSI148_LCSR_LMAT_NPRIV
)
2044 if (lm_ctl
& TSI148_LCSR_LMAT_PGM
)
2046 if (lm_ctl
& TSI148_LCSR_LMAT_DATA
)
2049 mutex_unlock(&(lm
->mtx
));
2055 * Attach a callback to a specific location monitor.
2057 * Callback will be passed the monitor triggered.
2059 int tsi148_lm_attach(struct vme_lm_resource
*lm
, int monitor
,
2060 void (*callback
)(int))
2064 mutex_lock(&(lm
->mtx
));
2066 /* Ensure that the location monitor is configured - need PGM or DATA */
2067 lm_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2068 if ((lm_ctl
& (TSI148_LCSR_LMAT_PGM
| TSI148_LCSR_LMAT_DATA
)) == 0) {
2069 mutex_unlock(&(lm
->mtx
));
2070 printk("Location monitor not properly configured\n");
2074 /* Check that a callback isn't already attached */
2075 if (lm_callback
[monitor
] != NULL
) {
2076 mutex_unlock(&(lm
->mtx
));
2077 printk("Existing callback attached\n");
2081 /* Attach callback */
2082 lm_callback
[monitor
] = callback
;
2084 /* Enable Location Monitor interrupt */
2085 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2086 tmp
|= TSI148_LCSR_INTEN_LMEN
[monitor
];
2087 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2089 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2090 tmp
|= TSI148_LCSR_INTEO_LMEO
[monitor
];
2091 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2093 /* Ensure that global Location Monitor Enable set */
2094 if ((lm_ctl
& TSI148_LCSR_LMAT_EN
) == 0) {
2095 lm_ctl
|= TSI148_LCSR_LMAT_EN
;
2096 iowrite32be(lm_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2099 mutex_unlock(&(lm
->mtx
));
2105 * Detach a callback function forn a specific location monitor.
2107 int tsi148_lm_detach(struct vme_lm_resource
*lm
, int monitor
)
2111 mutex_lock(&(lm
->mtx
));
2113 /* Disable Location Monitor and ensure previous interrupts are clear */
2114 lm_en
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2115 lm_en
&= ~TSI148_LCSR_INTEN_LMEN
[monitor
];
2116 iowrite32be(lm_en
, tsi148_bridge
->base
+ TSI148_LCSR_INTEN
);
2118 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2119 tmp
&= ~TSI148_LCSR_INTEO_LMEO
[monitor
];
2120 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_INTEO
);
2122 iowrite32be(TSI148_LCSR_INTC_LMC
[monitor
],
2123 tsi148_bridge
->base
+ TSI148_LCSR_INTC
);
2125 /* Detach callback */
2126 lm_callback
[monitor
] = NULL
;
2128 /* If all location monitors disabled, disable global Location Monitor */
2129 if ((lm_en
& (TSI148_LCSR_INTS_LM0S
| TSI148_LCSR_INTS_LM1S
|
2130 TSI148_LCSR_INTS_LM2S
| TSI148_LCSR_INTS_LM3S
)) == 0) {
2131 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2132 tmp
&= ~TSI148_LCSR_LMAT_EN
;
2133 iowrite32be(tmp
, tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2136 mutex_unlock(&(lm
->mtx
));
2142 * Determine Geographical Addressing
2144 int tsi148_slot_get(void)
2148 slot
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VSTAT
);
2149 slot
= slot
& TSI148_LCSR_VSTAT_GA_M
;
2153 static int __init
tsi148_init(void)
2155 return pci_register_driver(&tsi148_driver
);
2159 * Configure CR/CSR space
2161 * Access to the CR/CSR can be configured at power-up. The location of the
2162 * CR/CSR registers in the CR/CSR address space is determined by the boards
2163 * Auto-ID or Geographic address. This function ensures that the window is
2164 * enabled at an offset consistent with the boards geopgraphic address.
2166 * Each board has a 512kB window, with the highest 4kB being used for the
2167 * boards registers, this means there is a fix length 508kB window which must
2168 * be mapped onto PCI memory.
2170 static int tsi148_crcsr_init(struct pci_dev
*pdev
)
2172 u32 cbar
, crat
, vstat
;
2173 u32 crcsr_bus_high
, crcsr_bus_low
;
2176 /* Allocate mem for CR/CSR image */
2177 crcsr_kernel
= pci_alloc_consistent(pdev
, VME_CRCSR_BUF_SIZE
,
2179 if (crcsr_kernel
== NULL
) {
2180 dev_err(&pdev
->dev
, "Failed to allocate memory for CR/CSR "
2185 memset(crcsr_kernel
, 0, VME_CRCSR_BUF_SIZE
);
2187 reg_split(crcsr_bus
, &crcsr_bus_high
, &crcsr_bus_low
);
2189 iowrite32be(crcsr_bus_high
, tsi148_bridge
->base
+ TSI148_LCSR_CROU
);
2190 iowrite32be(crcsr_bus_low
, tsi148_bridge
->base
+ TSI148_LCSR_CROL
);
2192 /* Ensure that the CR/CSR is configured at the correct offset */
2193 cbar
= ioread32be(tsi148_bridge
->base
+ TSI148_CBAR
);
2194 cbar
= (cbar
& TSI148_CRCSR_CBAR_M
)>>3;
2196 vstat
= tsi148_slot_get();
2198 if (cbar
!= vstat
) {
2199 dev_info(&pdev
->dev
, "Setting CR/CSR offset\n");
2200 iowrite32be(cbar
<<3, tsi148_bridge
->base
+ TSI148_CBAR
);
2202 dev_info(&pdev
->dev
, "CR/CSR Offset: %d\n", cbar
);
2204 crat
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_CRAT
);
2205 if (crat
& TSI148_LCSR_CRAT_EN
) {
2206 dev_info(&pdev
->dev
, "Enabling CR/CSR space\n");
2207 iowrite32be(crat
| TSI148_LCSR_CRAT_EN
,
2208 tsi148_bridge
->base
+ TSI148_LCSR_CRAT
);
2210 dev_info(&pdev
->dev
, "CR/CSR already enabled\n");
2212 /* If we want flushed, error-checked writes, set up a window
2213 * over the CR/CSR registers. We read from here to safely flush
2214 * through VME writes.
2217 retval
= tsi148_master_set(flush_image
, 1, (vstat
* 0x80000),
2218 0x80000, VME_CRCSR
, VME_SCT
, VME_D16
);
2220 dev_err(&pdev
->dev
, "Configuring flush image failed\n");
2227 static void tsi148_crcsr_exit(struct pci_dev
*pdev
)
2231 /* Turn off CR/CSR space */
2232 crat
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_CRAT
);
2233 iowrite32be(crat
& ~TSI148_LCSR_CRAT_EN
,
2234 tsi148_bridge
->base
+ TSI148_LCSR_CRAT
);
2237 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_CROU
);
2238 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_CROL
);
2240 pci_free_consistent(pdev
, VME_CRCSR_BUF_SIZE
, crcsr_kernel
, crcsr_bus
);
2243 static int tsi148_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2245 int retval
, i
, master_num
;
2247 struct list_head
*pos
= NULL
;
2248 struct vme_master_resource
*master_image
;
2249 struct vme_slave_resource
*slave_image
;
2250 struct vme_dma_resource
*dma_ctrlr
;
2251 struct vme_lm_resource
*lm
;
2253 /* If we want to support more than one of each bridge, we need to
2254 * dynamically generate this so we get one per device
2256 tsi148_bridge
= (struct vme_bridge
*)kmalloc(sizeof(struct vme_bridge
),
2258 if (tsi148_bridge
== NULL
) {
2259 dev_err(&pdev
->dev
, "Failed to allocate memory for device "
2265 memset(tsi148_bridge
, 0, sizeof(struct vme_bridge
));
2267 /* Enable the device */
2268 retval
= pci_enable_device(pdev
);
2270 dev_err(&pdev
->dev
, "Unable to enable device\n");
2275 retval
= pci_request_regions(pdev
, driver_name
);
2277 dev_err(&pdev
->dev
, "Unable to reserve resources\n");
2281 /* map registers in BAR 0 */
2282 tsi148_bridge
->base
= ioremap_nocache(pci_resource_start(pdev
, 0), 4096);
2283 if (!tsi148_bridge
->base
) {
2284 dev_err(&pdev
->dev
, "Unable to remap CRG region\n");
2289 /* Check to see if the mapping worked out */
2290 data
= ioread32(tsi148_bridge
->base
+ TSI148_PCFS_ID
) & 0x0000FFFF;
2291 if (data
!= PCI_VENDOR_ID_TUNDRA
) {
2292 dev_err(&pdev
->dev
, "CRG region check failed\n");
2297 /* Initialize wait queues & mutual exclusion flags */
2298 /* XXX These need to be moved to the vme_bridge structure */
2299 init_waitqueue_head(&dma_queue
[0]);
2300 init_waitqueue_head(&dma_queue
[1]);
2301 init_waitqueue_head(&iack_queue
);
2302 mutex_init(&(vme_int
));
2303 mutex_init(&(vme_rmw
));
2305 tsi148_bridge
->parent
= &(pdev
->dev
);
2306 strcpy(tsi148_bridge
->name
, driver_name
);
2309 retval
= tsi148_irq_init(tsi148_bridge
);
2311 dev_err(&pdev
->dev
, "Chip Initialization failed.\n");
2315 /* If we are going to flush writes, we need to read from the VME bus.
2316 * We need to do this safely, thus we read the devices own CR/CSR
2317 * register. To do this we must set up a window in CR/CSR space and
2318 * hence have one less master window resource available.
2320 master_num
= TSI148_MAX_MASTER
;
2324 flush_image
= (struct vme_master_resource
*)kmalloc(
2325 sizeof(struct vme_master_resource
), GFP_KERNEL
);
2326 if (flush_image
== NULL
) {
2327 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2328 "flush resource structure\n");
2332 flush_image
->parent
= tsi148_bridge
;
2333 spin_lock_init(&(flush_image
->lock
));
2334 flush_image
->locked
= 1;
2335 flush_image
->number
= master_num
;
2336 flush_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
2338 flush_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
2339 VME_2eVME
| VME_2eSST
| VME_2eSSTB
| VME_2eSST160
|
2340 VME_2eSST267
| VME_2eSST320
| VME_SUPER
| VME_USER
|
2341 VME_PROG
| VME_DATA
;
2342 flush_image
->width_attr
= VME_D16
| VME_D32
;
2343 memset(&(flush_image
->pci_resource
), 0,
2344 sizeof(struct resource
));
2345 flush_image
->kern_base
= NULL
;
2348 /* Add master windows to list */
2349 INIT_LIST_HEAD(&(tsi148_bridge
->master_resources
));
2350 for (i
= 0; i
< master_num
; i
++) {
2351 master_image
= (struct vme_master_resource
*)kmalloc(
2352 sizeof(struct vme_master_resource
), GFP_KERNEL
);
2353 if (master_image
== NULL
) {
2354 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2355 "master resource structure\n");
2359 master_image
->parent
= tsi148_bridge
;
2360 spin_lock_init(&(master_image
->lock
));
2361 master_image
->locked
= 0;
2362 master_image
->number
= i
;
2363 master_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
2365 master_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
2366 VME_2eVME
| VME_2eSST
| VME_2eSSTB
| VME_2eSST160
|
2367 VME_2eSST267
| VME_2eSST320
| VME_SUPER
| VME_USER
|
2368 VME_PROG
| VME_DATA
;
2369 master_image
->width_attr
= VME_D16
| VME_D32
;
2370 memset(&(master_image
->pci_resource
), 0,
2371 sizeof(struct resource
));
2372 master_image
->kern_base
= NULL
;
2373 list_add_tail(&(master_image
->list
),
2374 &(tsi148_bridge
->master_resources
));
2377 /* Add slave windows to list */
2378 INIT_LIST_HEAD(&(tsi148_bridge
->slave_resources
));
2379 for (i
= 0; i
< TSI148_MAX_SLAVE
; i
++) {
2380 slave_image
= (struct vme_slave_resource
*)kmalloc(
2381 sizeof(struct vme_slave_resource
), GFP_KERNEL
);
2382 if (slave_image
== NULL
) {
2383 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2384 "slave resource structure\n");
2388 slave_image
->parent
= tsi148_bridge
;
2389 mutex_init(&(slave_image
->mtx
));
2390 slave_image
->locked
= 0;
2391 slave_image
->number
= i
;
2392 slave_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
2393 VME_A64
| VME_CRCSR
| VME_USER1
| VME_USER2
|
2394 VME_USER3
| VME_USER4
;
2395 slave_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
2396 VME_2eVME
| VME_2eSST
| VME_2eSSTB
| VME_2eSST160
|
2397 VME_2eSST267
| VME_2eSST320
| VME_SUPER
| VME_USER
|
2398 VME_PROG
| VME_DATA
;
2399 list_add_tail(&(slave_image
->list
),
2400 &(tsi148_bridge
->slave_resources
));
2403 /* Add dma engines to list */
2404 INIT_LIST_HEAD(&(tsi148_bridge
->dma_resources
));
2405 for (i
= 0; i
< TSI148_MAX_DMA
; i
++) {
2406 dma_ctrlr
= (struct vme_dma_resource
*)kmalloc(
2407 sizeof(struct vme_dma_resource
), GFP_KERNEL
);
2408 if (dma_ctrlr
== NULL
) {
2409 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2410 "dma resource structure\n");
2414 dma_ctrlr
->parent
= tsi148_bridge
;
2415 mutex_init(&(dma_ctrlr
->mtx
));
2416 dma_ctrlr
->locked
= 0;
2417 dma_ctrlr
->number
= i
;
2418 INIT_LIST_HEAD(&(dma_ctrlr
->pending
));
2419 INIT_LIST_HEAD(&(dma_ctrlr
->running
));
2420 list_add_tail(&(dma_ctrlr
->list
),
2421 &(tsi148_bridge
->dma_resources
));
2424 /* Add location monitor to list */
2425 INIT_LIST_HEAD(&(tsi148_bridge
->lm_resources
));
2426 lm
= kmalloc(sizeof(struct vme_lm_resource
), GFP_KERNEL
);
2428 dev_err(&pdev
->dev
, "Failed to allocate memory for "
2429 "location monitor resource structure\n");
2433 lm
->parent
= tsi148_bridge
;
2434 mutex_init(&(lm
->mtx
));
2438 list_add_tail(&(lm
->list
), &(tsi148_bridge
->lm_resources
));
2440 tsi148_bridge
->slave_get
= tsi148_slave_get
;
2441 tsi148_bridge
->slave_set
= tsi148_slave_set
;
2442 tsi148_bridge
->master_get
= tsi148_master_get
;
2443 tsi148_bridge
->master_set
= tsi148_master_set
;
2444 tsi148_bridge
->master_read
= tsi148_master_read
;
2445 tsi148_bridge
->master_write
= tsi148_master_write
;
2446 tsi148_bridge
->master_rmw
= tsi148_master_rmw
;
2447 tsi148_bridge
->dma_list_add
= tsi148_dma_list_add
;
2448 tsi148_bridge
->dma_list_exec
= tsi148_dma_list_exec
;
2449 tsi148_bridge
->dma_list_empty
= tsi148_dma_list_empty
;
2450 tsi148_bridge
->irq_set
= tsi148_irq_set
;
2451 tsi148_bridge
->irq_generate
= tsi148_irq_generate
;
2452 tsi148_bridge
->lm_set
= tsi148_lm_set
;
2453 tsi148_bridge
->lm_get
= tsi148_lm_get
;
2454 tsi148_bridge
->lm_attach
= tsi148_lm_attach
;
2455 tsi148_bridge
->lm_detach
= tsi148_lm_detach
;
2456 tsi148_bridge
->slot_get
= tsi148_slot_get
;
2458 data
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VSTAT
);
2459 dev_info(&pdev
->dev
, "Board is%s the VME system controller\n",
2460 (data
& TSI148_LCSR_VSTAT_SCONS
)? "" : " not");
2461 dev_info(&pdev
->dev
, "VME geographical address is %d\n",
2462 data
& TSI148_LCSR_VSTAT_GA_M
);
2463 dev_info(&pdev
->dev
, "VME Write and flush and error check is %s\n",
2464 err_chk
? "enabled" : "disabled");
2466 if(tsi148_crcsr_init(pdev
)) {
2467 dev_err(&pdev
->dev
, "CR/CSR configuration failed.\n");
2472 /* Need to save tsi148_bridge pointer locally in link list for use in
2475 retval
= vme_register_bridge(tsi148_bridge
);
2477 dev_err(&pdev
->dev
, "Chip Registration failed.\n");
2481 /* Clear VME bus "board fail", and "power-up reset" lines */
2482 data
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VSTAT
);
2483 data
&= ~TSI148_LCSR_VSTAT_BRDFL
;
2484 data
|= TSI148_LCSR_VSTAT_CPURST
;
2485 iowrite32be(data
, tsi148_bridge
->base
+ TSI148_LCSR_VSTAT
);
2489 vme_unregister_bridge(tsi148_bridge
);
2491 tsi148_crcsr_exit(pdev
);
2494 /* resources are stored in link list */
2495 list_for_each(pos
, &(tsi148_bridge
->lm_resources
)) {
2496 lm
= list_entry(pos
, struct vme_lm_resource
, list
);
2501 /* resources are stored in link list */
2502 list_for_each(pos
, &(tsi148_bridge
->dma_resources
)) {
2503 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
2508 /* resources are stored in link list */
2509 list_for_each(pos
, &(tsi148_bridge
->slave_resources
)) {
2510 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
2515 /* resources are stored in link list */
2516 list_for_each(pos
, &(tsi148_bridge
->master_resources
)) {
2517 master_image
= list_entry(pos
, struct vme_master_resource
, list
);
2519 kfree(master_image
);
2522 tsi148_irq_exit(pdev
);
2525 iounmap(tsi148_bridge
->base
);
2527 pci_release_regions(pdev
);
2529 pci_disable_device(pdev
);
2531 kfree(tsi148_bridge
);
2537 static void tsi148_remove(struct pci_dev
*pdev
)
2539 struct list_head
*pos
= NULL
;
2540 struct vme_master_resource
*master_image
;
2541 struct vme_slave_resource
*slave_image
;
2542 struct vme_dma_resource
*dma_ctrlr
;
2545 dev_dbg(&pdev
->dev
, "Driver is being unloaded.\n");
2547 /* XXX We need to find the pdev->dev in the list of vme_bridge->dev's */
2550 * Shutdown all inbound and outbound windows.
2552 for (i
= 0; i
< 8; i
++) {
2553 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_IT
[i
] +
2554 TSI148_LCSR_OFFSET_ITAT
);
2555 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_OT
[i
] +
2556 TSI148_LCSR_OFFSET_OTAT
);
2560 * Shutdown Location monitor.
2562 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_LMAT
);
2567 iowrite32be(0, tsi148_bridge
->base
+ TSI148_LCSR_CSRAT
);
2570 * Clear error status.
2572 iowrite32be(0xFFFFFFFF, tsi148_bridge
->base
+ TSI148_LCSR_EDPAT
);
2573 iowrite32be(0xFFFFFFFF, tsi148_bridge
->base
+ TSI148_LCSR_VEAT
);
2574 iowrite32be(0x07000700, tsi148_bridge
->base
+ TSI148_LCSR_PSTAT
);
2577 * Remove VIRQ interrupt (if any)
2579 if (ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VICR
) & 0x800) {
2580 iowrite32be(0x8000, tsi148_bridge
->base
+ TSI148_LCSR_VICR
);
2584 * Map all Interrupts to PCI INTA
2586 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTM1
);
2587 iowrite32be(0x0, tsi148_bridge
->base
+ TSI148_LCSR_INTM2
);
2589 tsi148_irq_exit(pdev
);
2591 vme_unregister_bridge(tsi148_bridge
);
2593 tsi148_crcsr_exit(pdev
);
2595 /* resources are stored in link list */
2596 list_for_each(pos
, &(tsi148_bridge
->dma_resources
)) {
2597 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
2602 /* resources are stored in link list */
2603 list_for_each(pos
, &(tsi148_bridge
->slave_resources
)) {
2604 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
2609 /* resources are stored in link list */
2610 list_for_each(pos
, &(tsi148_bridge
->master_resources
)) {
2611 master_image
= list_entry(pos
, struct vme_master_resource
, list
);
2613 kfree(master_image
);
2616 tsi148_irq_exit(pdev
);
2618 iounmap(tsi148_bridge
->base
);
2620 pci_release_regions(pdev
);
2622 pci_disable_device(pdev
);
2624 kfree(tsi148_bridge
);
2627 static void __exit
tsi148_exit(void)
2629 pci_unregister_driver(&tsi148_driver
);
2631 printk(KERN_DEBUG
"Driver removed.\n");
2634 MODULE_PARM_DESC(err_chk
, "Check for VME errors on reads and writes");
2635 module_param(err_chk
, bool, 0);
2637 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2638 MODULE_LICENSE("GPL");
2640 module_init(tsi148_init
);
2641 module_exit(tsi148_exit
);
2643 /*----------------------------------------------------------------------------
2645 *--------------------------------------------------------------------------*/
2649 * Direct Mode DMA transfer
2651 * XXX Not looking at direct mode for now, we can always use link list mode
2652 * with a single entry.
2654 int tsi148_dma_run(struct vme_dma_resource
*resource
, struct vme_dma_attr src
,
2655 struct vme_dma_attr dest
, size_t count
)
2661 struct vmeDmaPacket
*cur_dma
;
2662 struct tsi148_dma_descriptor
*dmaLL
;
2667 for (x
= 0; x
< 8; x
++) { /* vme block size */
2668 if ((32 << x
) >= vmeDma
->maxVmeBlockSize
) {
2674 dctlreg
|= (x
<< 12);
2676 for (x
= 0; x
< 8; x
++) { /* pci block size */
2677 if ((32 << x
) >= vmeDma
->maxPciBlockSize
) {
2683 dctlreg
|= (x
<< 4);
2685 if (vmeDma
->vmeBackOffTimer
) {
2686 for (x
= 1; x
< 8; x
++) { /* vme timer */
2687 if ((1 << (x
- 1)) >= vmeDma
->vmeBackOffTimer
) {
2693 dctlreg
|= (x
<< 8);
2696 if (vmeDma
->pciBackOffTimer
) {
2697 for (x
= 1; x
< 8; x
++) { /* pci timer */
2698 if ((1 << (x
- 1)) >= vmeDma
->pciBackOffTimer
) {
2704 dctlreg
|= (x
<< 0);
2707 /* Program registers for DMA transfer */
2708 iowrite32be(dmaLL
->dsau
, tsi148_bridge
->base
+
2709 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DSAU
);
2710 iowrite32be(dmaLL
->dsal
, tsi148_bridge
->base
+
2711 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DSAL
);
2712 iowrite32be(dmaLL
->ddau
, tsi148_bridge
->base
+
2713 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DDAU
);
2714 iowrite32be(dmaLL
->ddal
, tsi148_bridge
->base
+
2715 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DDAL
);
2716 iowrite32be(dmaLL
->dsat
, tsi148_bridge
->base
+
2717 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DSAT
);
2718 iowrite32be(dmaLL
->ddat
, tsi148_bridge
->base
+
2719 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DDAT
);
2720 iowrite32be(dmaLL
->dcnt
, tsi148_bridge
->base
+
2721 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DCNT
);
2722 iowrite32be(dmaLL
->ddbs
, tsi148_bridge
->base
+
2723 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DDBS
);
2725 /* Start the operation */
2726 iowrite32be(dctlreg
| 0x2000000, tsi148_bridge
->base
+
2727 TSI148_LCSR_DMA
[channel
] + TSI148_LCSR_OFFSET_DCTL
);
2729 tmp
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_DMA
[channel
] +
2730 TSI148_LCSR_OFFSET_DSTA
);
2731 wait_event_interruptible(dma_queue
[channel
], (tmp
& 0x1000000) == 0);
2734 * Read status register, we should probably do this in some error
2735 * handler rather than here so that we can be sure we haven't kicked off
2736 * another DMA transfer.
2738 val
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_DMA
[channel
] +
2739 TSI148_LCSR_OFFSET_DSTA
);
2741 vmeDma
->vmeDmaStatus
= 0;
2742 if (val
& 0x10000000) {
2744 "DMA Error in DMA_tempe_irqhandler DSTA=%08X\n",
2746 vmeDma
->vmeDmaStatus
= val
;
2755 /* Global VME controller information */
2756 struct pci_dev
*vme_pci_dev
;
2759 * Set the VME bus arbiter with the requested attributes
2761 int tempe_set_arbiter(vmeArbiterCfg_t
* vmeArb
)
2766 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VCTRL
);
2767 temp_ctl
&= 0xFFEFFF00;
2769 if (vmeArb
->globalTimeoutTimer
== 0xFFFFFFFF) {
2771 } else if (vmeArb
->globalTimeoutTimer
> 2048) {
2773 } else if (vmeArb
->globalTimeoutTimer
== 0) {
2777 while ((16 * (1 << (gto
- 1))) < vmeArb
->globalTimeoutTimer
) {
2783 if (vmeArb
->arbiterMode
!= VME_PRIORITY_MODE
) {
2787 if (vmeArb
->arbiterTimeoutFlag
) {
2791 if (vmeArb
->noEarlyReleaseFlag
) {
2792 temp_ctl
|= 1 << 20;
2794 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_VCTRL
);
2800 * Return the attributes of the VME bus arbiter.
2802 int tempe_get_arbiter(vmeArbiterCfg_t
* vmeArb
)
2808 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VCTRL
);
2810 gto
= temp_ctl
& 0xF;
2812 vmeArb
->globalTimeoutTimer
= (16 * (1 << (gto
- 1)));
2815 if (temp_ctl
& (1 << 6)) {
2816 vmeArb
->arbiterMode
= VME_R_ROBIN_MODE
;
2818 vmeArb
->arbiterMode
= VME_PRIORITY_MODE
;
2821 if (temp_ctl
& (1 << 7)) {
2822 vmeArb
->arbiterTimeoutFlag
= 1;
2825 if (temp_ctl
& (1 << 20)) {
2826 vmeArb
->noEarlyReleaseFlag
= 1;
2833 * Set the VME bus requestor with the requested attributes
2835 int tempe_set_requestor(vmeRequesterCfg_t
* vmeReq
)
2839 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
2840 temp_ctl
&= 0xFFFF0000;
2842 if (vmeReq
->releaseMode
== 1) {
2843 temp_ctl
|= (1 << 3);
2846 if (vmeReq
->fairMode
== 1) {
2847 temp_ctl
|= (1 << 2);
2850 temp_ctl
|= (vmeReq
->timeonTimeoutTimer
& 7) << 8;
2851 temp_ctl
|= (vmeReq
->timeoffTimeoutTimer
& 7) << 12;
2852 temp_ctl
|= vmeReq
->requestLevel
;
2854 iowrite32be(temp_ctl
, tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
2859 * Return the attributes of the VME bus requestor
2861 int tempe_get_requestor(vmeRequesterCfg_t
* vmeReq
)
2865 temp_ctl
= ioread32be(tsi148_bridge
->base
+ TSI148_LCSR_VMCTRL
);
2867 if (temp_ctl
& 0x18) {
2868 vmeReq
->releaseMode
= 1;
2871 if (temp_ctl
& (1 << 2)) {
2872 vmeReq
->fairMode
= 1;
2875 vmeReq
->requestLevel
= temp_ctl
& 3;
2876 vmeReq
->timeonTimeoutTimer
= (temp_ctl
>> 8) & 7;
2877 vmeReq
->timeoffTimeoutTimer
= (temp_ctl
>> 12) & 7;