Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / vme / bridges / vme_tsi148.c
1 /*
2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <asm/time.h>
29 #include <asm/io.h>
30 #include <asm/uaccess.h>
31
32 #include "../vme.h"
33 #include "../vme_bridge.h"
34 #include "vme_tsi148.h"
35
36 static int __init tsi148_init(void);
37 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
38 static void tsi148_remove(struct pci_dev *);
39 static void __exit tsi148_exit(void);
40
41
42 int tsi148_slave_set(struct vme_slave_resource *, int, unsigned long long,
43 unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
44 int tsi148_slave_get(struct vme_slave_resource *, int *, unsigned long long *,
45 unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
46
47 int tsi148_master_get(struct vme_master_resource *, int *, unsigned long long *,
48 unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
49 int tsi148_master_set(struct vme_master_resource *, int, unsigned long long,
50 unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
51 ssize_t tsi148_master_read(struct vme_master_resource *, void *, size_t,
52 loff_t);
53 ssize_t tsi148_master_write(struct vme_master_resource *, void *, size_t,
54 loff_t);
55 unsigned int tsi148_master_rmw(struct vme_master_resource *, unsigned int,
56 unsigned int, unsigned int, loff_t);
57 int tsi148_dma_list_add (struct vme_dma_list *, struct vme_dma_attr *,
58 struct vme_dma_attr *, size_t);
59 int tsi148_dma_list_exec(struct vme_dma_list *);
60 int tsi148_dma_list_empty(struct vme_dma_list *);
61 int tsi148_generate_irq(int, int);
62
63 /* Module parameter */
64 static int err_chk;
65 static int geoid;
66
67 static char driver_name[] = "vme_tsi148";
68
69 static const struct pci_device_id tsi148_ids[] = {
70 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
71 { },
72 };
73
74 static struct pci_driver tsi148_driver = {
75 .name = driver_name,
76 .id_table = tsi148_ids,
77 .probe = tsi148_probe,
78 .remove = tsi148_remove,
79 };
80
81 static void reg_join(unsigned int high, unsigned int low,
82 unsigned long long *variable)
83 {
84 *variable = (unsigned long long)high << 32;
85 *variable |= (unsigned long long)low;
86 }
87
88 static void reg_split(unsigned long long variable, unsigned int *high,
89 unsigned int *low)
90 {
91 *low = (unsigned int)variable & 0xFFFFFFFF;
92 *high = (unsigned int)(variable >> 32);
93 }
94
95 /*
96 * Wakes up DMA queue.
97 */
98 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
99 int channel_mask)
100 {
101 u32 serviced = 0;
102
103 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
104 wake_up(&(bridge->dma_queue[0]));
105 serviced |= TSI148_LCSR_INTC_DMA0C;
106 }
107 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
108 wake_up(&(bridge->dma_queue[1]));
109 serviced |= TSI148_LCSR_INTC_DMA1C;
110 }
111
112 return serviced;
113 }
114
115 /*
116 * Wake up location monitor queue
117 */
118 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
119 {
120 int i;
121 u32 serviced = 0;
122
123 for (i = 0; i < 4; i++) {
124 if(stat & TSI148_LCSR_INTS_LMS[i]) {
125 /* We only enable interrupts if the callback is set */
126 bridge->lm_callback[i](i);
127 serviced |= TSI148_LCSR_INTC_LMC[i];
128 }
129 }
130
131 return serviced;
132 }
133
134 /*
135 * Wake up mail box queue.
136 *
137 * XXX This functionality is not exposed up though API.
138 */
139 static u32 tsi148_MB_irqhandler(struct tsi148_driver *bridge, u32 stat)
140 {
141 int i;
142 u32 val;
143 u32 serviced = 0;
144
145 for (i = 0; i < 4; i++) {
146 if(stat & TSI148_LCSR_INTS_MBS[i]) {
147 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
148 printk("VME Mailbox %d received: 0x%x\n", i, val);
149 serviced |= TSI148_LCSR_INTC_MBC[i];
150 }
151 }
152
153 return serviced;
154 }
155
156 /*
157 * Display error & status message when PERR (PCI) exception interrupt occurs.
158 */
159 static u32 tsi148_PERR_irqhandler(struct tsi148_driver *bridge)
160 {
161 printk(KERN_ERR
162 "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
163 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
164 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
165 ioread32be(bridge->base + TSI148_LCSR_EDPAT)
166 );
167 printk(KERN_ERR
168 "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
169 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
170 ioread32be(bridge->base + TSI148_LCSR_EDPXS)
171 );
172
173 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
174
175 return TSI148_LCSR_INTC_PERRC;
176 }
177
178 /*
179 * Save address and status when VME error interrupt occurs.
180 */
181 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
182 {
183 unsigned int error_addr_high, error_addr_low;
184 unsigned long long error_addr;
185 u32 error_attrib;
186 struct vme_bus_error *error;
187 struct tsi148_driver *bridge;
188
189 bridge = tsi148_bridge->driver_priv;
190
191 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
192 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
193 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
194
195 reg_join(error_addr_high, error_addr_low, &error_addr);
196
197 /* Check for exception register overflow (we have lost error data) */
198 if(error_attrib & TSI148_LCSR_VEAT_VEOF) {
199 printk(KERN_ERR "VME Bus Exception Overflow Occurred\n");
200 }
201
202 error = (struct vme_bus_error *)kmalloc(sizeof (struct vme_bus_error),
203 GFP_ATOMIC);
204 if (error) {
205 error->address = error_addr;
206 error->attributes = error_attrib;
207 list_add_tail(&(error->list), &(tsi148_bridge->vme_errors));
208 } else {
209 printk(KERN_ERR
210 "Unable to alloc memory for VMEbus Error reporting\n");
211 printk(KERN_ERR
212 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
213 error_addr, error_attrib);
214 }
215
216 /* Clear Status */
217 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
218
219 return TSI148_LCSR_INTC_VERRC;
220 }
221
222 /*
223 * Wake up IACK queue.
224 */
225 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
226 {
227 wake_up(&(bridge->iack_queue));
228
229 return TSI148_LCSR_INTC_IACKC;
230 }
231
232 /*
233 * Calling VME bus interrupt callback if provided.
234 */
235 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
236 u32 stat)
237 {
238 int vec, i, serviced = 0;
239 struct tsi148_driver *bridge;
240
241 bridge = tsi148_bridge->driver_priv;
242
243 for (i = 7; i > 0; i--) {
244 if (stat & (1 << i)) {
245 /*
246 * Note: Even though the registers are defined
247 * as 32-bits in the spec, we only want to issue
248 * 8-bit IACK cycles on the bus, read from offset
249 * 3.
250 */
251 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
252
253 vme_irq_handler(tsi148_bridge, i, vec);
254
255 serviced |= (1 << i);
256 }
257 }
258
259 return serviced;
260 }
261
262 /*
263 * Top level interrupt handler. Clears appropriate interrupt status bits and
264 * then calls appropriate sub handler(s).
265 */
266 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
267 {
268 u32 stat, enable, serviced = 0;
269 struct vme_bridge *tsi148_bridge;
270 struct tsi148_driver *bridge;
271
272 tsi148_bridge = ptr;
273
274 bridge = tsi148_bridge->driver_priv;
275
276 /* Determine which interrupts are unmasked and set */
277 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
278 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
279
280 /* Only look at unmasked interrupts */
281 stat &= enable;
282
283 if (unlikely(!stat)) {
284 return IRQ_NONE;
285 }
286
287 /* Call subhandlers as appropriate */
288 /* DMA irqs */
289 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
290 serviced |= tsi148_DMA_irqhandler(bridge, stat);
291
292 /* Location monitor irqs */
293 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
294 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
295 serviced |= tsi148_LM_irqhandler(bridge, stat);
296
297 /* Mail box irqs */
298 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
299 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
300 serviced |= tsi148_MB_irqhandler(bridge, stat);
301
302 /* PCI bus error */
303 if (stat & TSI148_LCSR_INTS_PERRS)
304 serviced |= tsi148_PERR_irqhandler(bridge);
305
306 /* VME bus error */
307 if (stat & TSI148_LCSR_INTS_VERRS)
308 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
309
310 /* IACK irq */
311 if (stat & TSI148_LCSR_INTS_IACKS)
312 serviced |= tsi148_IACK_irqhandler(bridge);
313
314 /* VME bus irqs */
315 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
316 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
317 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
318 TSI148_LCSR_INTS_IRQ1S))
319 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
320
321 /* Clear serviced interrupts */
322 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
323
324 return IRQ_HANDLED;
325 }
326
327 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
328 {
329 int result;
330 unsigned int tmp;
331 struct pci_dev *pdev;
332 struct tsi148_driver *bridge;
333
334 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
335
336 bridge = tsi148_bridge->driver_priv;
337
338 /* Initialise list for VME bus errors */
339 INIT_LIST_HEAD(&(tsi148_bridge->vme_errors));
340
341 mutex_init(&(tsi148_bridge->irq_mtx));
342
343 result = request_irq(pdev->irq,
344 tsi148_irqhandler,
345 IRQF_SHARED,
346 driver_name, tsi148_bridge);
347 if (result) {
348 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
349 pdev->irq);
350 return result;
351 }
352
353 /* Enable and unmask interrupts */
354 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
355 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
356 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
357 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
358 TSI148_LCSR_INTEO_IACKEO;
359
360 /* This leaves the following interrupts masked.
361 * TSI148_LCSR_INTEO_VIEEO
362 * TSI148_LCSR_INTEO_SYSFLEO
363 * TSI148_LCSR_INTEO_ACFLEO
364 */
365
366 /* Don't enable Location Monitor interrupts here - they will be
367 * enabled when the location monitors are properly configured and
368 * a callback has been attached.
369 * TSI148_LCSR_INTEO_LM0EO
370 * TSI148_LCSR_INTEO_LM1EO
371 * TSI148_LCSR_INTEO_LM2EO
372 * TSI148_LCSR_INTEO_LM3EO
373 */
374
375 /* Don't enable VME interrupts until we add a handler, else the board
376 * will respond to it and we don't want that unless it knows how to
377 * properly deal with it.
378 * TSI148_LCSR_INTEO_IRQ7EO
379 * TSI148_LCSR_INTEO_IRQ6EO
380 * TSI148_LCSR_INTEO_IRQ5EO
381 * TSI148_LCSR_INTEO_IRQ4EO
382 * TSI148_LCSR_INTEO_IRQ3EO
383 * TSI148_LCSR_INTEO_IRQ2EO
384 * TSI148_LCSR_INTEO_IRQ1EO
385 */
386
387 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
388 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
389
390 return 0;
391 }
392
393 static void tsi148_irq_exit(struct tsi148_driver *bridge, struct pci_dev *pdev)
394 {
395 /* Turn off interrupts */
396 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
397 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
398
399 /* Clear all interrupts */
400 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
401
402 /* Detach interrupt handler */
403 free_irq(pdev->irq, pdev);
404 }
405
406 /*
407 * Check to see if an IACk has been received, return true (1) or false (0).
408 */
409 int tsi148_iack_received(struct tsi148_driver *bridge)
410 {
411 u32 tmp;
412
413 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
414
415 if (tmp & TSI148_LCSR_VICR_IRQS)
416 return 0;
417 else
418 return 1;
419 }
420
421 /*
422 * Configure VME interrupt
423 */
424 void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
425 int state, int sync)
426 {
427 struct pci_dev *pdev;
428 u32 tmp;
429 struct tsi148_driver *bridge;
430
431 bridge = tsi148_bridge->driver_priv;
432
433 /* We need to do the ordering differently for enabling and disabling */
434 if (state == 0) {
435 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
436 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
437 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
438
439 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
440 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
441 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
442
443 if (sync != 0) {
444 pdev = container_of(tsi148_bridge->parent,
445 struct pci_dev, dev);
446
447 synchronize_irq(pdev->irq);
448 }
449 } else {
450 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
451 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
452 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
453
454 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
455 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
456 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
457 }
458 }
459
460 /*
461 * Generate a VME bus interrupt at the requested level & vector. Wait for
462 * interrupt to be acked.
463 */
464 int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, int statid)
465 {
466 u32 tmp;
467 struct tsi148_driver *bridge;
468
469 bridge = tsi148_bridge->driver_priv;
470
471 mutex_lock(&(bridge->vme_int));
472
473 /* Read VICR register */
474 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
475
476 /* Set Status/ID */
477 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
478 (statid & TSI148_LCSR_VICR_STID_M);
479 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
480
481 /* Assert VMEbus IRQ */
482 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
483 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
484
485 /* XXX Consider implementing a timeout? */
486 wait_event_interruptible(bridge->iack_queue,
487 tsi148_iack_received(bridge));
488
489 mutex_unlock(&(bridge->vme_int));
490
491 return 0;
492 }
493
494 /*
495 * Find the first error in this address range
496 */
497 static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
498 vme_address_t aspace, unsigned long long address, size_t count)
499 {
500 struct list_head *err_pos;
501 struct vme_bus_error *vme_err, *valid = NULL;
502 unsigned long long bound;
503
504 bound = address + count;
505
506 /*
507 * XXX We are currently not looking at the address space when parsing
508 * for errors. This is because parsing the Address Modifier Codes
509 * is going to be quite resource intensive to do properly. We
510 * should be OK just looking at the addresses and this is certainly
511 * much better than what we had before.
512 */
513 err_pos = NULL;
514 /* Iterate through errors */
515 list_for_each(err_pos, &(tsi148_bridge->vme_errors)) {
516 vme_err = list_entry(err_pos, struct vme_bus_error, list);
517 if((vme_err->address >= address) && (vme_err->address < bound)){
518 valid = vme_err;
519 break;
520 }
521 }
522
523 return valid;
524 }
525
526 /*
527 * Clear errors in the provided address range.
528 */
529 static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
530 vme_address_t aspace, unsigned long long address, size_t count)
531 {
532 struct list_head *err_pos, *temp;
533 struct vme_bus_error *vme_err;
534 unsigned long long bound;
535
536 bound = address + count;
537
538 /*
539 * XXX We are currently not looking at the address space when parsing
540 * for errors. This is because parsing the Address Modifier Codes
541 * is going to be quite resource intensive to do properly. We
542 * should be OK just looking at the addresses and this is certainly
543 * much better than what we had before.
544 */
545 err_pos = NULL;
546 /* Iterate through errors */
547 list_for_each_safe(err_pos, temp, &(tsi148_bridge->vme_errors)) {
548 vme_err = list_entry(err_pos, struct vme_bus_error, list);
549
550 if((vme_err->address >= address) && (vme_err->address < bound)){
551 list_del(err_pos);
552 kfree(vme_err);
553 }
554 }
555 }
556
557 /*
558 * Initialize a slave window with the requested attributes.
559 */
560 int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
561 unsigned long long vme_base, unsigned long long size,
562 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
563 {
564 unsigned int i, addr = 0, granularity = 0;
565 unsigned int temp_ctl = 0;
566 unsigned int vme_base_low, vme_base_high;
567 unsigned int vme_bound_low, vme_bound_high;
568 unsigned int pci_offset_low, pci_offset_high;
569 unsigned long long vme_bound, pci_offset;
570 struct tsi148_driver *bridge;
571
572 bridge = image->parent->driver_priv;
573
574 i = image->number;
575
576 switch (aspace) {
577 case VME_A16:
578 granularity = 0x10;
579 addr |= TSI148_LCSR_ITAT_AS_A16;
580 break;
581 case VME_A24:
582 granularity = 0x1000;
583 addr |= TSI148_LCSR_ITAT_AS_A24;
584 break;
585 case VME_A32:
586 granularity = 0x10000;
587 addr |= TSI148_LCSR_ITAT_AS_A32;
588 break;
589 case VME_A64:
590 granularity = 0x10000;
591 addr |= TSI148_LCSR_ITAT_AS_A64;
592 break;
593 case VME_CRCSR:
594 case VME_USER1:
595 case VME_USER2:
596 case VME_USER3:
597 case VME_USER4:
598 default:
599 printk("Invalid address space\n");
600 return -EINVAL;
601 break;
602 }
603
604 /* Convert 64-bit variables to 2x 32-bit variables */
605 reg_split(vme_base, &vme_base_high, &vme_base_low);
606
607 /*
608 * Bound address is a valid address for the window, adjust
609 * accordingly
610 */
611 vme_bound = vme_base + size - granularity;
612 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
613 pci_offset = (unsigned long long)pci_base - vme_base;
614 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
615
616 if (vme_base_low & (granularity - 1)) {
617 printk("Invalid VME base alignment\n");
618 return -EINVAL;
619 }
620 if (vme_bound_low & (granularity - 1)) {
621 printk("Invalid VME bound alignment\n");
622 return -EINVAL;
623 }
624 if (pci_offset_low & (granularity - 1)) {
625 printk("Invalid PCI Offset alignment\n");
626 return -EINVAL;
627 }
628
629 /* Disable while we are mucking around */
630 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
631 TSI148_LCSR_OFFSET_ITAT);
632 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
633 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
634 TSI148_LCSR_OFFSET_ITAT);
635
636 /* Setup mapping */
637 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
638 TSI148_LCSR_OFFSET_ITSAU);
639 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
640 TSI148_LCSR_OFFSET_ITSAL);
641 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
642 TSI148_LCSR_OFFSET_ITEAU);
643 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
644 TSI148_LCSR_OFFSET_ITEAL);
645 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
646 TSI148_LCSR_OFFSET_ITOFU);
647 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
648 TSI148_LCSR_OFFSET_ITOFL);
649
650 /* Setup 2eSST speeds */
651 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
652 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
653 case VME_2eSST160:
654 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
655 break;
656 case VME_2eSST267:
657 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
658 break;
659 case VME_2eSST320:
660 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
661 break;
662 }
663
664 /* Setup cycle types */
665 temp_ctl &= ~(0x1F << 7);
666 if (cycle & VME_BLT)
667 temp_ctl |= TSI148_LCSR_ITAT_BLT;
668 if (cycle & VME_MBLT)
669 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
670 if (cycle & VME_2eVME)
671 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
672 if (cycle & VME_2eSST)
673 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
674 if (cycle & VME_2eSSTB)
675 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
676
677 /* Setup address space */
678 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
679 temp_ctl |= addr;
680
681 temp_ctl &= ~0xF;
682 if (cycle & VME_SUPER)
683 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
684 if (cycle & VME_USER)
685 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
686 if (cycle & VME_PROG)
687 temp_ctl |= TSI148_LCSR_ITAT_PGM;
688 if (cycle & VME_DATA)
689 temp_ctl |= TSI148_LCSR_ITAT_DATA;
690
691 /* Write ctl reg without enable */
692 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
693 TSI148_LCSR_OFFSET_ITAT);
694
695 if (enabled)
696 temp_ctl |= TSI148_LCSR_ITAT_EN;
697
698 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
699 TSI148_LCSR_OFFSET_ITAT);
700
701 return 0;
702 }
703
704 /*
705 * Get slave window configuration.
706 */
707 int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
708 unsigned long long *vme_base, unsigned long long *size,
709 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
710 {
711 unsigned int i, granularity = 0, ctl = 0;
712 unsigned int vme_base_low, vme_base_high;
713 unsigned int vme_bound_low, vme_bound_high;
714 unsigned int pci_offset_low, pci_offset_high;
715 unsigned long long vme_bound, pci_offset;
716 struct tsi148_driver *bridge;
717
718 bridge = image->parent->driver_priv;
719
720 i = image->number;
721
722 /* Read registers */
723 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
724 TSI148_LCSR_OFFSET_ITAT);
725
726 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
727 TSI148_LCSR_OFFSET_ITSAU);
728 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
729 TSI148_LCSR_OFFSET_ITSAL);
730 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
731 TSI148_LCSR_OFFSET_ITEAU);
732 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
733 TSI148_LCSR_OFFSET_ITEAL);
734 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
735 TSI148_LCSR_OFFSET_ITOFU);
736 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
737 TSI148_LCSR_OFFSET_ITOFL);
738
739 /* Convert 64-bit variables to 2x 32-bit variables */
740 reg_join(vme_base_high, vme_base_low, vme_base);
741 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
742 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
743
744 *pci_base = (dma_addr_t)vme_base + pci_offset;
745
746 *enabled = 0;
747 *aspace = 0;
748 *cycle = 0;
749
750 if (ctl & TSI148_LCSR_ITAT_EN)
751 *enabled = 1;
752
753 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
754 granularity = 0x10;
755 *aspace |= VME_A16;
756 }
757 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
758 granularity = 0x1000;
759 *aspace |= VME_A24;
760 }
761 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
762 granularity = 0x10000;
763 *aspace |= VME_A32;
764 }
765 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
766 granularity = 0x10000;
767 *aspace |= VME_A64;
768 }
769
770 /* Need granularity before we set the size */
771 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
772
773
774 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
775 *cycle |= VME_2eSST160;
776 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
777 *cycle |= VME_2eSST267;
778 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
779 *cycle |= VME_2eSST320;
780
781 if (ctl & TSI148_LCSR_ITAT_BLT)
782 *cycle |= VME_BLT;
783 if (ctl & TSI148_LCSR_ITAT_MBLT)
784 *cycle |= VME_MBLT;
785 if (ctl & TSI148_LCSR_ITAT_2eVME)
786 *cycle |= VME_2eVME;
787 if (ctl & TSI148_LCSR_ITAT_2eSST)
788 *cycle |= VME_2eSST;
789 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
790 *cycle |= VME_2eSSTB;
791
792 if (ctl & TSI148_LCSR_ITAT_SUPR)
793 *cycle |= VME_SUPER;
794 if (ctl & TSI148_LCSR_ITAT_NPRIV)
795 *cycle |= VME_USER;
796 if (ctl & TSI148_LCSR_ITAT_PGM)
797 *cycle |= VME_PROG;
798 if (ctl & TSI148_LCSR_ITAT_DATA)
799 *cycle |= VME_DATA;
800
801 return 0;
802 }
803
804 /*
805 * Allocate and map PCI Resource
806 */
807 static int tsi148_alloc_resource(struct vme_master_resource *image,
808 unsigned long long size)
809 {
810 unsigned long long existing_size;
811 int retval = 0;
812 struct pci_dev *pdev;
813 struct vme_bridge *tsi148_bridge;
814
815 tsi148_bridge = image->parent;
816
817 /* Find pci_dev container of dev */
818 if (tsi148_bridge->parent == NULL) {
819 printk("Dev entry NULL\n");
820 return -EINVAL;
821 }
822 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
823
824 existing_size = (unsigned long long)(image->bus_resource.end -
825 image->bus_resource.start);
826
827 /* If the existing size is OK, return */
828 if ((size != 0) && (existing_size == (size - 1)))
829 return 0;
830
831 if (existing_size != 0) {
832 iounmap(image->kern_base);
833 image->kern_base = NULL;
834 if (image->bus_resource.name != NULL)
835 kfree(image->bus_resource.name);
836 release_resource(&(image->bus_resource));
837 memset(&(image->bus_resource), 0, sizeof(struct resource));
838 }
839
840 /* Exit here if size is zero */
841 if (size == 0) {
842 return 0;
843 }
844
845 if (image->bus_resource.name == NULL) {
846 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
847 if (image->bus_resource.name == NULL) {
848 printk(KERN_ERR "Unable to allocate memory for resource"
849 " name\n");
850 retval = -ENOMEM;
851 goto err_name;
852 }
853 }
854
855 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
856 image->number);
857
858 image->bus_resource.start = 0;
859 image->bus_resource.end = (unsigned long)size;
860 image->bus_resource.flags = IORESOURCE_MEM;
861
862 retval = pci_bus_alloc_resource(pdev->bus,
863 &(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
864 0, NULL, NULL);
865 if (retval) {
866 printk(KERN_ERR "Failed to allocate mem resource for "
867 "window %d size 0x%lx start 0x%lx\n",
868 image->number, (unsigned long)size,
869 (unsigned long)image->bus_resource.start);
870 goto err_resource;
871 }
872
873 image->kern_base = ioremap_nocache(
874 image->bus_resource.start, size);
875 if (image->kern_base == NULL) {
876 printk(KERN_ERR "Failed to remap resource\n");
877 retval = -ENOMEM;
878 goto err_remap;
879 }
880
881 return 0;
882
883 iounmap(image->kern_base);
884 image->kern_base = NULL;
885 err_remap:
886 release_resource(&(image->bus_resource));
887 err_resource:
888 kfree(image->bus_resource.name);
889 memset(&(image->bus_resource), 0, sizeof(struct resource));
890 err_name:
891 return retval;
892 }
893
894 /*
895 * Free and unmap PCI Resource
896 */
897 static void tsi148_free_resource(struct vme_master_resource *image)
898 {
899 iounmap(image->kern_base);
900 image->kern_base = NULL;
901 release_resource(&(image->bus_resource));
902 kfree(image->bus_resource.name);
903 memset(&(image->bus_resource), 0, sizeof(struct resource));
904 }
905
906 /*
907 * Set the attributes of an outbound window.
908 */
909 int tsi148_master_set( struct vme_master_resource *image, int enabled,
910 unsigned long long vme_base, unsigned long long size,
911 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
912 {
913 int retval = 0;
914 unsigned int i;
915 unsigned int temp_ctl = 0;
916 unsigned int pci_base_low, pci_base_high;
917 unsigned int pci_bound_low, pci_bound_high;
918 unsigned int vme_offset_low, vme_offset_high;
919 unsigned long long pci_bound, vme_offset, pci_base;
920 struct tsi148_driver *bridge;
921
922 bridge = image->parent->driver_priv;
923
924 /* Verify input data */
925 if (vme_base & 0xFFFF) {
926 printk(KERN_ERR "Invalid VME Window alignment\n");
927 retval = -EINVAL;
928 goto err_window;
929 }
930
931 if ((size == 0) && (enabled != 0)) {
932 printk(KERN_ERR "Size must be non-zero for enabled windows\n");
933 retval = -EINVAL;
934 goto err_window;
935 }
936
937 spin_lock(&(image->lock));
938
939 /* Let's allocate the resource here rather than further up the stack as
940 * it avoids pushing loads of bus dependant stuff up the stack. If size
941 * is zero, any existing resource will be freed.
942 */
943 retval = tsi148_alloc_resource(image, size);
944 if (retval) {
945 spin_unlock(&(image->lock));
946 printk(KERN_ERR "Unable to allocate memory for "
947 "resource\n");
948 goto err_res;
949 }
950
951 if (size == 0) {
952 pci_base = 0;
953 pci_bound = 0;
954 vme_offset = 0;
955 } else {
956 pci_base = (unsigned long long)image->bus_resource.start;
957
958 /*
959 * Bound address is a valid address for the window, adjust
960 * according to window granularity.
961 */
962 pci_bound = pci_base + (size - 0x10000);
963 vme_offset = vme_base - pci_base;
964 }
965
966 /* Convert 64-bit variables to 2x 32-bit variables */
967 reg_split(pci_base, &pci_base_high, &pci_base_low);
968 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
969 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
970
971 if (pci_base_low & 0xFFFF) {
972 spin_unlock(&(image->lock));
973 printk(KERN_ERR "Invalid PCI base alignment\n");
974 retval = -EINVAL;
975 goto err_gran;
976 }
977 if (pci_bound_low & 0xFFFF) {
978 spin_unlock(&(image->lock));
979 printk(KERN_ERR "Invalid PCI bound alignment\n");
980 retval = -EINVAL;
981 goto err_gran;
982 }
983 if (vme_offset_low & 0xFFFF) {
984 spin_unlock(&(image->lock));
985 printk(KERN_ERR "Invalid VME Offset alignment\n");
986 retval = -EINVAL;
987 goto err_gran;
988 }
989
990 i = image->number;
991
992 /* Disable while we are mucking around */
993 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
994 TSI148_LCSR_OFFSET_OTAT);
995 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
996 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
997 TSI148_LCSR_OFFSET_OTAT);
998
999 /* Setup 2eSST speeds */
1000 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
1001 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1002 case VME_2eSST160:
1003 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
1004 break;
1005 case VME_2eSST267:
1006 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1007 break;
1008 case VME_2eSST320:
1009 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1010 break;
1011 }
1012
1013 /* Setup cycle types */
1014 if (cycle & VME_BLT) {
1015 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1016 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1017 }
1018 if (cycle & VME_MBLT) {
1019 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1020 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1021 }
1022 if (cycle & VME_2eVME) {
1023 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1024 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1025 }
1026 if (cycle & VME_2eSST) {
1027 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1028 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1029 }
1030 if (cycle & VME_2eSSTB) {
1031 printk(KERN_WARNING "Currently not setting Broadcast Select "
1032 "Registers\n");
1033 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1034 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1035 }
1036
1037 /* Setup data width */
1038 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1039 switch (dwidth) {
1040 case VME_D16:
1041 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1042 break;
1043 case VME_D32:
1044 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1045 break;
1046 default:
1047 spin_unlock(&(image->lock));
1048 printk(KERN_ERR "Invalid data width\n");
1049 retval = -EINVAL;
1050 goto err_dwidth;
1051 }
1052
1053 /* Setup address space */
1054 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1055 switch (aspace) {
1056 case VME_A16:
1057 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1058 break;
1059 case VME_A24:
1060 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1061 break;
1062 case VME_A32:
1063 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1064 break;
1065 case VME_A64:
1066 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1067 break;
1068 case VME_CRCSR:
1069 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1070 break;
1071 case VME_USER1:
1072 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1073 break;
1074 case VME_USER2:
1075 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1076 break;
1077 case VME_USER3:
1078 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1079 break;
1080 case VME_USER4:
1081 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1082 break;
1083 default:
1084 spin_unlock(&(image->lock));
1085 printk(KERN_ERR "Invalid address space\n");
1086 retval = -EINVAL;
1087 goto err_aspace;
1088 break;
1089 }
1090
1091 temp_ctl &= ~(3<<4);
1092 if (cycle & VME_SUPER)
1093 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1094 if (cycle & VME_PROG)
1095 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1096
1097 /* Setup mapping */
1098 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1099 TSI148_LCSR_OFFSET_OTSAU);
1100 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1101 TSI148_LCSR_OFFSET_OTSAL);
1102 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1103 TSI148_LCSR_OFFSET_OTEAU);
1104 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1105 TSI148_LCSR_OFFSET_OTEAL);
1106 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1107 TSI148_LCSR_OFFSET_OTOFU);
1108 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1109 TSI148_LCSR_OFFSET_OTOFL);
1110
1111 /* Write ctl reg without enable */
1112 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1113 TSI148_LCSR_OFFSET_OTAT);
1114
1115 if (enabled)
1116 temp_ctl |= TSI148_LCSR_OTAT_EN;
1117
1118 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1119 TSI148_LCSR_OFFSET_OTAT);
1120
1121 spin_unlock(&(image->lock));
1122 return 0;
1123
1124 err_aspace:
1125 err_dwidth:
1126 err_gran:
1127 tsi148_free_resource(image);
1128 err_res:
1129 err_window:
1130 return retval;
1131
1132 }
1133
1134 /*
1135 * Set the attributes of an outbound window.
1136 *
1137 * XXX Not parsing prefetch information.
1138 */
1139 int __tsi148_master_get( struct vme_master_resource *image, int *enabled,
1140 unsigned long long *vme_base, unsigned long long *size,
1141 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1142 {
1143 unsigned int i, ctl;
1144 unsigned int pci_base_low, pci_base_high;
1145 unsigned int pci_bound_low, pci_bound_high;
1146 unsigned int vme_offset_low, vme_offset_high;
1147
1148 unsigned long long pci_base, pci_bound, vme_offset;
1149 struct tsi148_driver *bridge;
1150
1151 bridge = image->parent->driver_priv;
1152
1153 i = image->number;
1154
1155 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1156 TSI148_LCSR_OFFSET_OTAT);
1157
1158 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1159 TSI148_LCSR_OFFSET_OTSAU);
1160 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1161 TSI148_LCSR_OFFSET_OTSAL);
1162 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1163 TSI148_LCSR_OFFSET_OTEAU);
1164 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1165 TSI148_LCSR_OFFSET_OTEAL);
1166 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1167 TSI148_LCSR_OFFSET_OTOFU);
1168 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1169 TSI148_LCSR_OFFSET_OTOFL);
1170
1171 /* Convert 64-bit variables to 2x 32-bit variables */
1172 reg_join(pci_base_high, pci_base_low, &pci_base);
1173 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1174 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1175
1176 *vme_base = pci_base + vme_offset;
1177 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1178
1179 *enabled = 0;
1180 *aspace = 0;
1181 *cycle = 0;
1182 *dwidth = 0;
1183
1184 if (ctl & TSI148_LCSR_OTAT_EN)
1185 *enabled = 1;
1186
1187 /* Setup address space */
1188 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1189 *aspace |= VME_A16;
1190 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1191 *aspace |= VME_A24;
1192 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1193 *aspace |= VME_A32;
1194 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1195 *aspace |= VME_A64;
1196 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1197 *aspace |= VME_CRCSR;
1198 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1199 *aspace |= VME_USER1;
1200 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1201 *aspace |= VME_USER2;
1202 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1203 *aspace |= VME_USER3;
1204 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1205 *aspace |= VME_USER4;
1206
1207 /* Setup 2eSST speeds */
1208 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1209 *cycle |= VME_2eSST160;
1210 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1211 *cycle |= VME_2eSST267;
1212 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1213 *cycle |= VME_2eSST320;
1214
1215 /* Setup cycle types */
1216 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_SCT)
1217 *cycle |= VME_SCT;
1218 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_BLT)
1219 *cycle |= VME_BLT;
1220 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_MBLT)
1221 *cycle |= VME_MBLT;
1222 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eVME)
1223 *cycle |= VME_2eVME;
1224 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSST)
1225 *cycle |= VME_2eSST;
1226 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSSTB)
1227 *cycle |= VME_2eSSTB;
1228
1229 if (ctl & TSI148_LCSR_OTAT_SUP)
1230 *cycle |= VME_SUPER;
1231 else
1232 *cycle |= VME_USER;
1233
1234 if (ctl & TSI148_LCSR_OTAT_PGM)
1235 *cycle |= VME_PROG;
1236 else
1237 *cycle |= VME_DATA;
1238
1239 /* Setup data width */
1240 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1241 *dwidth = VME_D16;
1242 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1243 *dwidth = VME_D32;
1244
1245 return 0;
1246 }
1247
1248
1249 int tsi148_master_get( struct vme_master_resource *image, int *enabled,
1250 unsigned long long *vme_base, unsigned long long *size,
1251 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1252 {
1253 int retval;
1254
1255 spin_lock(&(image->lock));
1256
1257 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1258 cycle, dwidth);
1259
1260 spin_unlock(&(image->lock));
1261
1262 return retval;
1263 }
1264
1265 ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1266 size_t count, loff_t offset)
1267 {
1268 int retval, enabled;
1269 unsigned long long vme_base, size;
1270 vme_address_t aspace;
1271 vme_cycle_t cycle;
1272 vme_width_t dwidth;
1273 struct vme_bus_error *vme_err = NULL;
1274 struct vme_bridge *tsi148_bridge;
1275
1276 tsi148_bridge = image->parent;
1277
1278 spin_lock(&(image->lock));
1279
1280 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
1281 retval = count;
1282
1283 if (!err_chk)
1284 goto skip_chk;
1285
1286 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1287 &dwidth);
1288
1289 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1290 count);
1291 if(vme_err != NULL) {
1292 dev_err(image->parent->parent, "First VME read error detected "
1293 "an at address 0x%llx\n", vme_err->address);
1294 retval = vme_err->address - (vme_base + offset);
1295 /* Clear down save errors in this address range */
1296 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1297 count);
1298 }
1299
1300 skip_chk:
1301 spin_unlock(&(image->lock));
1302
1303 return retval;
1304 }
1305
1306
1307 ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1308 size_t count, loff_t offset)
1309 {
1310 int retval = 0, enabled;
1311 unsigned long long vme_base, size;
1312 vme_address_t aspace;
1313 vme_cycle_t cycle;
1314 vme_width_t dwidth;
1315
1316 struct vme_bus_error *vme_err = NULL;
1317 struct vme_bridge *tsi148_bridge;
1318 struct tsi148_driver *bridge;
1319
1320 tsi148_bridge = image->parent;
1321
1322 bridge = tsi148_bridge->driver_priv;
1323
1324 spin_lock(&(image->lock));
1325
1326 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
1327 retval = count;
1328
1329 /*
1330 * Writes are posted. We need to do a read on the VME bus to flush out
1331 * all of the writes before we check for errors. We can't guarentee
1332 * that reading the data we have just written is safe. It is believed
1333 * that there isn't any read, write re-ordering, so we can read any
1334 * location in VME space, so lets read the Device ID from the tsi148's
1335 * own registers as mapped into CR/CSR space.
1336 *
1337 * We check for saved errors in the written address range/space.
1338 */
1339
1340 if (!err_chk)
1341 goto skip_chk;
1342
1343 /*
1344 * Get window info first, to maximise the time that the buffers may
1345 * fluch on their own
1346 */
1347 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1348 &dwidth);
1349
1350 ioread16(bridge->flush_image->kern_base + 0x7F000);
1351
1352 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1353 count);
1354 if(vme_err != NULL) {
1355 printk("First VME write error detected an at address 0x%llx\n",
1356 vme_err->address);
1357 retval = vme_err->address - (vme_base + offset);
1358 /* Clear down save errors in this address range */
1359 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1360 count);
1361 }
1362
1363 skip_chk:
1364 spin_unlock(&(image->lock));
1365
1366 return retval;
1367 }
1368
1369 /*
1370 * Perform an RMW cycle on the VME bus.
1371 *
1372 * Requires a previously configured master window, returns final value.
1373 */
1374 unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1375 unsigned int mask, unsigned int compare, unsigned int swap,
1376 loff_t offset)
1377 {
1378 unsigned long long pci_addr;
1379 unsigned int pci_addr_high, pci_addr_low;
1380 u32 tmp, result;
1381 int i;
1382 struct tsi148_driver *bridge;
1383
1384 bridge = image->parent->driver_priv;
1385
1386 /* Find the PCI address that maps to the desired VME address */
1387 i = image->number;
1388
1389 /* Locking as we can only do one of these at a time */
1390 mutex_lock(&(bridge->vme_rmw));
1391
1392 /* Lock image */
1393 spin_lock(&(image->lock));
1394
1395 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1396 TSI148_LCSR_OFFSET_OTSAU);
1397 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1398 TSI148_LCSR_OFFSET_OTSAL);
1399
1400 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1401 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1402
1403 /* Configure registers */
1404 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1405 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1406 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1407 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1408 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1409
1410 /* Enable RMW */
1411 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1412 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1413 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1414
1415 /* Kick process off with a read to the required address. */
1416 result = ioread32be(image->kern_base + offset);
1417
1418 /* Disable RMW */
1419 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1420 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1421 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1422
1423 spin_unlock(&(image->lock));
1424
1425 mutex_unlock(&(bridge->vme_rmw));
1426
1427 return result;
1428 }
1429
1430 static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
1431 vme_cycle_t cycle, vme_width_t dwidth)
1432 {
1433 /* Setup 2eSST speeds */
1434 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1435 case VME_2eSST160:
1436 *attr |= TSI148_LCSR_DSAT_2eSSTM_160;
1437 break;
1438 case VME_2eSST267:
1439 *attr |= TSI148_LCSR_DSAT_2eSSTM_267;
1440 break;
1441 case VME_2eSST320:
1442 *attr |= TSI148_LCSR_DSAT_2eSSTM_320;
1443 break;
1444 }
1445
1446 /* Setup cycle types */
1447 if (cycle & VME_SCT) {
1448 *attr |= TSI148_LCSR_DSAT_TM_SCT;
1449 }
1450 if (cycle & VME_BLT) {
1451 *attr |= TSI148_LCSR_DSAT_TM_BLT;
1452 }
1453 if (cycle & VME_MBLT) {
1454 *attr |= TSI148_LCSR_DSAT_TM_MBLT;
1455 }
1456 if (cycle & VME_2eVME) {
1457 *attr |= TSI148_LCSR_DSAT_TM_2eVME;
1458 }
1459 if (cycle & VME_2eSST) {
1460 *attr |= TSI148_LCSR_DSAT_TM_2eSST;
1461 }
1462 if (cycle & VME_2eSSTB) {
1463 printk("Currently not setting Broadcast Select Registers\n");
1464 *attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
1465 }
1466
1467 /* Setup data width */
1468 switch (dwidth) {
1469 case VME_D16:
1470 *attr |= TSI148_LCSR_DSAT_DBW_16;
1471 break;
1472 case VME_D32:
1473 *attr |= TSI148_LCSR_DSAT_DBW_32;
1474 break;
1475 default:
1476 printk("Invalid data width\n");
1477 return -EINVAL;
1478 }
1479
1480 /* Setup address space */
1481 switch (aspace) {
1482 case VME_A16:
1483 *attr |= TSI148_LCSR_DSAT_AMODE_A16;
1484 break;
1485 case VME_A24:
1486 *attr |= TSI148_LCSR_DSAT_AMODE_A24;
1487 break;
1488 case VME_A32:
1489 *attr |= TSI148_LCSR_DSAT_AMODE_A32;
1490 break;
1491 case VME_A64:
1492 *attr |= TSI148_LCSR_DSAT_AMODE_A64;
1493 break;
1494 case VME_CRCSR:
1495 *attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1496 break;
1497 case VME_USER1:
1498 *attr |= TSI148_LCSR_DSAT_AMODE_USER1;
1499 break;
1500 case VME_USER2:
1501 *attr |= TSI148_LCSR_DSAT_AMODE_USER2;
1502 break;
1503 case VME_USER3:
1504 *attr |= TSI148_LCSR_DSAT_AMODE_USER3;
1505 break;
1506 case VME_USER4:
1507 *attr |= TSI148_LCSR_DSAT_AMODE_USER4;
1508 break;
1509 default:
1510 printk("Invalid address space\n");
1511 return -EINVAL;
1512 break;
1513 }
1514
1515 if (cycle & VME_SUPER)
1516 *attr |= TSI148_LCSR_DSAT_SUP;
1517 if (cycle & VME_PROG)
1518 *attr |= TSI148_LCSR_DSAT_PGM;
1519
1520 return 0;
1521 }
1522
1523 static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
1524 vme_cycle_t cycle, vme_width_t dwidth)
1525 {
1526 /* Setup 2eSST speeds */
1527 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1528 case VME_2eSST160:
1529 *attr |= TSI148_LCSR_DDAT_2eSSTM_160;
1530 break;
1531 case VME_2eSST267:
1532 *attr |= TSI148_LCSR_DDAT_2eSSTM_267;
1533 break;
1534 case VME_2eSST320:
1535 *attr |= TSI148_LCSR_DDAT_2eSSTM_320;
1536 break;
1537 }
1538
1539 /* Setup cycle types */
1540 if (cycle & VME_SCT) {
1541 *attr |= TSI148_LCSR_DDAT_TM_SCT;
1542 }
1543 if (cycle & VME_BLT) {
1544 *attr |= TSI148_LCSR_DDAT_TM_BLT;
1545 }
1546 if (cycle & VME_MBLT) {
1547 *attr |= TSI148_LCSR_DDAT_TM_MBLT;
1548 }
1549 if (cycle & VME_2eVME) {
1550 *attr |= TSI148_LCSR_DDAT_TM_2eVME;
1551 }
1552 if (cycle & VME_2eSST) {
1553 *attr |= TSI148_LCSR_DDAT_TM_2eSST;
1554 }
1555 if (cycle & VME_2eSSTB) {
1556 printk("Currently not setting Broadcast Select Registers\n");
1557 *attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
1558 }
1559
1560 /* Setup data width */
1561 switch (dwidth) {
1562 case VME_D16:
1563 *attr |= TSI148_LCSR_DDAT_DBW_16;
1564 break;
1565 case VME_D32:
1566 *attr |= TSI148_LCSR_DDAT_DBW_32;
1567 break;
1568 default:
1569 printk("Invalid data width\n");
1570 return -EINVAL;
1571 }
1572
1573 /* Setup address space */
1574 switch (aspace) {
1575 case VME_A16:
1576 *attr |= TSI148_LCSR_DDAT_AMODE_A16;
1577 break;
1578 case VME_A24:
1579 *attr |= TSI148_LCSR_DDAT_AMODE_A24;
1580 break;
1581 case VME_A32:
1582 *attr |= TSI148_LCSR_DDAT_AMODE_A32;
1583 break;
1584 case VME_A64:
1585 *attr |= TSI148_LCSR_DDAT_AMODE_A64;
1586 break;
1587 case VME_CRCSR:
1588 *attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1589 break;
1590 case VME_USER1:
1591 *attr |= TSI148_LCSR_DDAT_AMODE_USER1;
1592 break;
1593 case VME_USER2:
1594 *attr |= TSI148_LCSR_DDAT_AMODE_USER2;
1595 break;
1596 case VME_USER3:
1597 *attr |= TSI148_LCSR_DDAT_AMODE_USER3;
1598 break;
1599 case VME_USER4:
1600 *attr |= TSI148_LCSR_DDAT_AMODE_USER4;
1601 break;
1602 default:
1603 printk("Invalid address space\n");
1604 return -EINVAL;
1605 break;
1606 }
1607
1608 if (cycle & VME_SUPER)
1609 *attr |= TSI148_LCSR_DDAT_SUP;
1610 if (cycle & VME_PROG)
1611 *attr |= TSI148_LCSR_DDAT_PGM;
1612
1613 return 0;
1614 }
1615
1616 /*
1617 * Add a link list descriptor to the list
1618 */
1619 int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
1620 struct vme_dma_attr *dest, size_t count)
1621 {
1622 struct tsi148_dma_entry *entry, *prev;
1623 u32 address_high, address_low;
1624 struct vme_dma_pattern *pattern_attr;
1625 struct vme_dma_pci *pci_attr;
1626 struct vme_dma_vme *vme_attr;
1627 dma_addr_t desc_ptr;
1628 int retval = 0;
1629
1630 /* Descriptor must be aligned on 64-bit boundaries */
1631 entry = (struct tsi148_dma_entry *)kmalloc(
1632 sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1633 if (entry == NULL) {
1634 printk("Failed to allocate memory for dma resource "
1635 "structure\n");
1636 retval = -ENOMEM;
1637 goto err_mem;
1638 }
1639
1640 /* Test descriptor alignment */
1641 if ((unsigned long)&(entry->descriptor) & 0x7) {
1642 printk("Descriptor not aligned to 8 byte boundary as "
1643 "required: %p\n", &(entry->descriptor));
1644 retval = -EINVAL;
1645 goto err_align;
1646 }
1647
1648 /* Given we are going to fill out the structure, we probably don't
1649 * need to zero it, but better safe than sorry for now.
1650 */
1651 memset(&(entry->descriptor), 0, sizeof(struct tsi148_dma_descriptor));
1652
1653 /* Fill out source part */
1654 switch (src->type) {
1655 case VME_DMA_PATTERN:
1656 pattern_attr = (struct vme_dma_pattern *)src->private;
1657
1658 entry->descriptor.dsal = pattern_attr->pattern;
1659 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
1660 /* Default behaviour is 32 bit pattern */
1661 if (pattern_attr->type & VME_DMA_PATTERN_BYTE) {
1662 entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
1663 }
1664 /* It seems that the default behaviour is to increment */
1665 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) {
1666 entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
1667 }
1668 break;
1669 case VME_DMA_PCI:
1670 pci_attr = (struct vme_dma_pci *)src->private;
1671
1672 reg_split((unsigned long long)pci_attr->address, &address_high,
1673 &address_low);
1674 entry->descriptor.dsau = address_high;
1675 entry->descriptor.dsal = address_low;
1676 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
1677 break;
1678 case VME_DMA_VME:
1679 vme_attr = (struct vme_dma_vme *)src->private;
1680
1681 reg_split((unsigned long long)vme_attr->address, &address_high,
1682 &address_low);
1683 entry->descriptor.dsau = address_high;
1684 entry->descriptor.dsal = address_low;
1685 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
1686
1687 retval = tsi148_dma_set_vme_src_attributes(
1688 &(entry->descriptor.dsat), vme_attr->aspace,
1689 vme_attr->cycle, vme_attr->dwidth);
1690 if(retval < 0 )
1691 goto err_source;
1692 break;
1693 default:
1694 printk("Invalid source type\n");
1695 retval = -EINVAL;
1696 goto err_source;
1697 break;
1698 }
1699
1700 /* Assume last link - this will be over-written by adding another */
1701 entry->descriptor.dnlau = 0;
1702 entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
1703
1704
1705 /* Fill out destination part */
1706 switch (dest->type) {
1707 case VME_DMA_PCI:
1708 pci_attr = (struct vme_dma_pci *)dest->private;
1709
1710 reg_split((unsigned long long)pci_attr->address, &address_high,
1711 &address_low);
1712 entry->descriptor.ddau = address_high;
1713 entry->descriptor.ddal = address_low;
1714 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
1715 break;
1716 case VME_DMA_VME:
1717 vme_attr = (struct vme_dma_vme *)dest->private;
1718
1719 reg_split((unsigned long long)vme_attr->address, &address_high,
1720 &address_low);
1721 entry->descriptor.ddau = address_high;
1722 entry->descriptor.ddal = address_low;
1723 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
1724
1725 retval = tsi148_dma_set_vme_dest_attributes(
1726 &(entry->descriptor.ddat), vme_attr->aspace,
1727 vme_attr->cycle, vme_attr->dwidth);
1728 if(retval < 0 )
1729 goto err_dest;
1730 break;
1731 default:
1732 printk("Invalid destination type\n");
1733 retval = -EINVAL;
1734 goto err_dest;
1735 break;
1736 }
1737
1738 /* Fill out count */
1739 entry->descriptor.dcnt = (u32)count;
1740
1741 /* Add to list */
1742 list_add_tail(&(entry->list), &(list->entries));
1743
1744 /* Fill out previous descriptors "Next Address" */
1745 if(entry->list.prev != &(list->entries)){
1746 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1747 list);
1748 /* We need the bus address for the pointer */
1749 desc_ptr = virt_to_bus(&(entry->descriptor));
1750 reg_split(desc_ptr, &(prev->descriptor.dnlau),
1751 &(prev->descriptor.dnlal));
1752 }
1753
1754 return 0;
1755
1756 err_dest:
1757 err_source:
1758 err_align:
1759 kfree(entry);
1760 err_mem:
1761 return retval;
1762 }
1763
1764 /*
1765 * Check to see if the provided DMA channel is busy.
1766 */
1767 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1768 {
1769 u32 tmp;
1770 struct tsi148_driver *bridge;
1771
1772 bridge = tsi148_bridge->driver_priv;
1773
1774 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1775 TSI148_LCSR_OFFSET_DSTA);
1776
1777 if (tmp & TSI148_LCSR_DSTA_BSY)
1778 return 0;
1779 else
1780 return 1;
1781
1782 }
1783
1784 /*
1785 * Execute a previously generated link list
1786 *
1787 * XXX Need to provide control register configuration.
1788 */
1789 int tsi148_dma_list_exec(struct vme_dma_list *list)
1790 {
1791 struct vme_dma_resource *ctrlr;
1792 int channel, retval = 0;
1793 struct tsi148_dma_entry *entry;
1794 dma_addr_t bus_addr;
1795 u32 bus_addr_high, bus_addr_low;
1796 u32 val, dctlreg = 0;
1797 struct tsi148_driver *bridge;
1798
1799 ctrlr = list->parent;
1800
1801 bridge = ctrlr->parent->driver_priv;
1802
1803 mutex_lock(&(ctrlr->mtx));
1804
1805 channel = ctrlr->number;
1806
1807 if (! list_empty(&(ctrlr->running))) {
1808 /*
1809 * XXX We have an active DMA transfer and currently haven't
1810 * sorted out the mechanism for "pending" DMA transfers.
1811 * Return busy.
1812 */
1813 /* Need to add to pending here */
1814 mutex_unlock(&(ctrlr->mtx));
1815 return -EBUSY;
1816 } else {
1817 list_add(&(list->list), &(ctrlr->running));
1818 }
1819
1820 /* Get first bus address and write into registers */
1821 entry = list_first_entry(&(list->entries), struct tsi148_dma_entry,
1822 list);
1823
1824 bus_addr = virt_to_bus(&(entry->descriptor));
1825
1826 mutex_unlock(&(ctrlr->mtx));
1827
1828 reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
1829
1830 iowrite32be(bus_addr_high, bridge->base +
1831 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1832 iowrite32be(bus_addr_low, bridge->base +
1833 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1834
1835 /* Start the operation */
1836 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1837 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1838
1839 wait_event_interruptible(bridge->dma_queue[channel],
1840 tsi148_dma_busy(ctrlr->parent, channel));
1841 /*
1842 * Read status register, this register is valid until we kick off a
1843 * new transfer.
1844 */
1845 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1846 TSI148_LCSR_OFFSET_DSTA);
1847
1848 if (val & TSI148_LCSR_DSTA_VBE) {
1849 printk(KERN_ERR "tsi148: DMA Error. DSTA=%08X\n", val);
1850 retval = -EIO;
1851 }
1852
1853 /* Remove list from running list */
1854 mutex_lock(&(ctrlr->mtx));
1855 list_del(&(list->list));
1856 mutex_unlock(&(ctrlr->mtx));
1857
1858 return retval;
1859 }
1860
1861 /*
1862 * Clean up a previously generated link list
1863 *
1864 * We have a separate function, don't assume that the chain can't be reused.
1865 */
1866 int tsi148_dma_list_empty(struct vme_dma_list *list)
1867 {
1868 struct list_head *pos, *temp;
1869 struct tsi148_dma_entry *entry;
1870
1871 /* detach and free each entry */
1872 list_for_each_safe(pos, temp, &(list->entries)) {
1873 list_del(pos);
1874 entry = list_entry(pos, struct tsi148_dma_entry, list);
1875 kfree(entry);
1876 }
1877
1878 return (0);
1879 }
1880
1881 /*
1882 * All 4 location monitors reside at the same base - this is therefore a
1883 * system wide configuration.
1884 *
1885 * This does not enable the LM monitor - that should be done when the first
1886 * callback is attached and disabled when the last callback is removed.
1887 */
1888 int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1889 vme_address_t aspace, vme_cycle_t cycle)
1890 {
1891 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1892 int i;
1893 struct tsi148_driver *bridge;
1894
1895 bridge = lm->parent->driver_priv;
1896
1897 mutex_lock(&(lm->mtx));
1898
1899 /* If we already have a callback attached, we can't move it! */
1900 for (i = 0; i < lm->monitors; i++) {
1901 if (bridge->lm_callback[i] != NULL) {
1902 mutex_unlock(&(lm->mtx));
1903 printk("Location monitor callback attached, can't "
1904 "reset\n");
1905 return -EBUSY;
1906 }
1907 }
1908
1909 switch (aspace) {
1910 case VME_A16:
1911 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1912 break;
1913 case VME_A24:
1914 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1915 break;
1916 case VME_A32:
1917 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1918 break;
1919 case VME_A64:
1920 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1921 break;
1922 default:
1923 mutex_unlock(&(lm->mtx));
1924 printk("Invalid address space\n");
1925 return -EINVAL;
1926 break;
1927 }
1928
1929 if (cycle & VME_SUPER)
1930 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1931 if (cycle & VME_USER)
1932 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1933 if (cycle & VME_PROG)
1934 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1935 if (cycle & VME_DATA)
1936 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1937
1938 reg_split(lm_base, &lm_base_high, &lm_base_low);
1939
1940 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1941 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1942 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1943
1944 mutex_unlock(&(lm->mtx));
1945
1946 return 0;
1947 }
1948
1949 /* Get configuration of the callback monitor and return whether it is enabled
1950 * or disabled.
1951 */
1952 int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
1953 vme_address_t *aspace, vme_cycle_t *cycle)
1954 {
1955 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1956 struct tsi148_driver *bridge;
1957
1958 bridge = lm->parent->driver_priv;
1959
1960 mutex_lock(&(lm->mtx));
1961
1962 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1963 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1964 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
1965
1966 reg_join(lm_base_high, lm_base_low, lm_base);
1967
1968 if (lm_ctl & TSI148_LCSR_LMAT_EN)
1969 enabled = 1;
1970
1971 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) {
1972 *aspace |= VME_A16;
1973 }
1974 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) {
1975 *aspace |= VME_A24;
1976 }
1977 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) {
1978 *aspace |= VME_A32;
1979 }
1980 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) {
1981 *aspace |= VME_A64;
1982 }
1983
1984 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
1985 *cycle |= VME_SUPER;
1986 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
1987 *cycle |= VME_USER;
1988 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
1989 *cycle |= VME_PROG;
1990 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
1991 *cycle |= VME_DATA;
1992
1993 mutex_unlock(&(lm->mtx));
1994
1995 return enabled;
1996 }
1997
1998 /*
1999 * Attach a callback to a specific location monitor.
2000 *
2001 * Callback will be passed the monitor triggered.
2002 */
2003 int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2004 void (*callback)(int))
2005 {
2006 u32 lm_ctl, tmp;
2007 struct tsi148_driver *bridge;
2008
2009 bridge = lm->parent->driver_priv;
2010
2011 mutex_lock(&(lm->mtx));
2012
2013 /* Ensure that the location monitor is configured - need PGM or DATA */
2014 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2015 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2016 mutex_unlock(&(lm->mtx));
2017 printk("Location monitor not properly configured\n");
2018 return -EINVAL;
2019 }
2020
2021 /* Check that a callback isn't already attached */
2022 if (bridge->lm_callback[monitor] != NULL) {
2023 mutex_unlock(&(lm->mtx));
2024 printk("Existing callback attached\n");
2025 return -EBUSY;
2026 }
2027
2028 /* Attach callback */
2029 bridge->lm_callback[monitor] = callback;
2030
2031 /* Enable Location Monitor interrupt */
2032 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2033 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2034 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2035
2036 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2037 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2038 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2039
2040 /* Ensure that global Location Monitor Enable set */
2041 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2042 lm_ctl |= TSI148_LCSR_LMAT_EN;
2043 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2044 }
2045
2046 mutex_unlock(&(lm->mtx));
2047
2048 return 0;
2049 }
2050
2051 /*
2052 * Detach a callback function forn a specific location monitor.
2053 */
2054 int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2055 {
2056 u32 lm_en, tmp;
2057 struct tsi148_driver *bridge;
2058
2059 bridge = lm->parent->driver_priv;
2060
2061 mutex_lock(&(lm->mtx));
2062
2063 /* Disable Location Monitor and ensure previous interrupts are clear */
2064 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2065 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2066 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2067
2068 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2069 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2070 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2071
2072 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2073 bridge->base + TSI148_LCSR_INTC);
2074
2075 /* Detach callback */
2076 bridge->lm_callback[monitor] = NULL;
2077
2078 /* If all location monitors disabled, disable global Location Monitor */
2079 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2080 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2081 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2082 tmp &= ~TSI148_LCSR_LMAT_EN;
2083 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2084 }
2085
2086 mutex_unlock(&(lm->mtx));
2087
2088 return 0;
2089 }
2090
2091 /*
2092 * Determine Geographical Addressing
2093 */
2094 int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2095 {
2096 u32 slot = 0;
2097 struct tsi148_driver *bridge;
2098
2099 bridge = tsi148_bridge->driver_priv;
2100
2101 if (!geoid) {
2102 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2103 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2104 } else
2105 slot = geoid;
2106
2107 return (int)slot;
2108 }
2109
2110 static int __init tsi148_init(void)
2111 {
2112 return pci_register_driver(&tsi148_driver);
2113 }
2114
2115 /*
2116 * Configure CR/CSR space
2117 *
2118 * Access to the CR/CSR can be configured at power-up. The location of the
2119 * CR/CSR registers in the CR/CSR address space is determined by the boards
2120 * Auto-ID or Geographic address. This function ensures that the window is
2121 * enabled at an offset consistent with the boards geopgraphic address.
2122 *
2123 * Each board has a 512kB window, with the highest 4kB being used for the
2124 * boards registers, this means there is a fix length 508kB window which must
2125 * be mapped onto PCI memory.
2126 */
2127 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2128 struct pci_dev *pdev)
2129 {
2130 u32 cbar, crat, vstat;
2131 u32 crcsr_bus_high, crcsr_bus_low;
2132 int retval;
2133 struct tsi148_driver *bridge;
2134
2135 bridge = tsi148_bridge->driver_priv;
2136
2137 /* Allocate mem for CR/CSR image */
2138 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2139 &(bridge->crcsr_bus));
2140 if (bridge->crcsr_kernel == NULL) {
2141 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
2142 "image\n");
2143 return -ENOMEM;
2144 }
2145
2146 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2147
2148 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2149
2150 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2151 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2152
2153 /* Ensure that the CR/CSR is configured at the correct offset */
2154 cbar = ioread32be(bridge->base + TSI148_CBAR);
2155 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2156
2157 vstat = tsi148_slot_get(tsi148_bridge);
2158
2159 if (cbar != vstat) {
2160 cbar = vstat;
2161 dev_info(&pdev->dev, "Setting CR/CSR offset\n");
2162 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2163 }
2164 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", cbar);
2165
2166 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2167 if (crat & TSI148_LCSR_CRAT_EN) {
2168 dev_info(&pdev->dev, "Enabling CR/CSR space\n");
2169 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2170 bridge->base + TSI148_LCSR_CRAT);
2171 } else
2172 dev_info(&pdev->dev, "CR/CSR already enabled\n");
2173
2174 /* If we want flushed, error-checked writes, set up a window
2175 * over the CR/CSR registers. We read from here to safely flush
2176 * through VME writes.
2177 */
2178 if(err_chk) {
2179 retval = tsi148_master_set(bridge->flush_image, 1,
2180 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2181 VME_D16);
2182 if (retval)
2183 dev_err(&pdev->dev, "Configuring flush image failed\n");
2184 }
2185
2186 return 0;
2187
2188 }
2189
2190 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2191 struct pci_dev *pdev)
2192 {
2193 u32 crat;
2194 struct tsi148_driver *bridge;
2195
2196 bridge = tsi148_bridge->driver_priv;
2197
2198 /* Turn off CR/CSR space */
2199 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2200 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2201 bridge->base + TSI148_LCSR_CRAT);
2202
2203 /* Free image */
2204 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2205 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2206
2207 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2208 bridge->crcsr_bus);
2209 }
2210
2211 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2212 {
2213 int retval, i, master_num;
2214 u32 data;
2215 struct list_head *pos = NULL;
2216 struct vme_bridge *tsi148_bridge;
2217 struct tsi148_driver *tsi148_device;
2218 struct vme_master_resource *master_image;
2219 struct vme_slave_resource *slave_image;
2220 struct vme_dma_resource *dma_ctrlr;
2221 struct vme_lm_resource *lm;
2222
2223 /* If we want to support more than one of each bridge, we need to
2224 * dynamically generate this so we get one per device
2225 */
2226 tsi148_bridge = (struct vme_bridge *)kmalloc(sizeof(struct vme_bridge),
2227 GFP_KERNEL);
2228 if (tsi148_bridge == NULL) {
2229 dev_err(&pdev->dev, "Failed to allocate memory for device "
2230 "structure\n");
2231 retval = -ENOMEM;
2232 goto err_struct;
2233 }
2234
2235 memset(tsi148_bridge, 0, sizeof(struct vme_bridge));
2236
2237 tsi148_device = kmalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2238 if (tsi148_device == NULL) {
2239 dev_err(&pdev->dev, "Failed to allocate memory for device "
2240 "structure\n");
2241 retval = -ENOMEM;
2242 goto err_driver;
2243 }
2244
2245 memset(tsi148_device, 0, sizeof(struct tsi148_driver));
2246
2247 tsi148_bridge->driver_priv = tsi148_device;
2248
2249 /* Enable the device */
2250 retval = pci_enable_device(pdev);
2251 if (retval) {
2252 dev_err(&pdev->dev, "Unable to enable device\n");
2253 goto err_enable;
2254 }
2255
2256 /* Map Registers */
2257 retval = pci_request_regions(pdev, driver_name);
2258 if (retval) {
2259 dev_err(&pdev->dev, "Unable to reserve resources\n");
2260 goto err_resource;
2261 }
2262
2263 /* map registers in BAR 0 */
2264 tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2265 4096);
2266 if (!tsi148_device->base) {
2267 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2268 retval = -EIO;
2269 goto err_remap;
2270 }
2271
2272 /* Check to see if the mapping worked out */
2273 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2274 if (data != PCI_VENDOR_ID_TUNDRA) {
2275 dev_err(&pdev->dev, "CRG region check failed\n");
2276 retval = -EIO;
2277 goto err_test;
2278 }
2279
2280 /* Initialize wait queues & mutual exclusion flags */
2281 init_waitqueue_head(&(tsi148_device->dma_queue[0]));
2282 init_waitqueue_head(&(tsi148_device->dma_queue[1]));
2283 init_waitqueue_head(&(tsi148_device->iack_queue));
2284 mutex_init(&(tsi148_device->vme_int));
2285 mutex_init(&(tsi148_device->vme_rmw));
2286
2287 tsi148_bridge->parent = &(pdev->dev);
2288 strcpy(tsi148_bridge->name, driver_name);
2289
2290 /* Setup IRQ */
2291 retval = tsi148_irq_init(tsi148_bridge);
2292 if (retval != 0) {
2293 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2294 goto err_irq;
2295 }
2296
2297 /* If we are going to flush writes, we need to read from the VME bus.
2298 * We need to do this safely, thus we read the devices own CR/CSR
2299 * register. To do this we must set up a window in CR/CSR space and
2300 * hence have one less master window resource available.
2301 */
2302 master_num = TSI148_MAX_MASTER;
2303 if(err_chk){
2304 master_num--;
2305
2306 tsi148_device->flush_image = (struct vme_master_resource *)
2307 kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2308 if (tsi148_device->flush_image == NULL) {
2309 dev_err(&pdev->dev, "Failed to allocate memory for "
2310 "flush resource structure\n");
2311 retval = -ENOMEM;
2312 goto err_master;
2313 }
2314 tsi148_device->flush_image->parent = tsi148_bridge;
2315 spin_lock_init(&(tsi148_device->flush_image->lock));
2316 tsi148_device->flush_image->locked = 1;
2317 tsi148_device->flush_image->number = master_num;
2318 tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
2319 VME_A32 | VME_A64;
2320 tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
2321 VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
2322 VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
2323 VME_USER | VME_PROG | VME_DATA;
2324 tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
2325 memset(&(tsi148_device->flush_image->bus_resource), 0,
2326 sizeof(struct resource));
2327 tsi148_device->flush_image->kern_base = NULL;
2328 }
2329
2330 /* Add master windows to list */
2331 INIT_LIST_HEAD(&(tsi148_bridge->master_resources));
2332 for (i = 0; i < master_num; i++) {
2333 master_image = (struct vme_master_resource *)kmalloc(
2334 sizeof(struct vme_master_resource), GFP_KERNEL);
2335 if (master_image == NULL) {
2336 dev_err(&pdev->dev, "Failed to allocate memory for "
2337 "master resource structure\n");
2338 retval = -ENOMEM;
2339 goto err_master;
2340 }
2341 master_image->parent = tsi148_bridge;
2342 spin_lock_init(&(master_image->lock));
2343 master_image->locked = 0;
2344 master_image->number = i;
2345 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2346 VME_A64;
2347 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2348 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2349 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2350 VME_PROG | VME_DATA;
2351 master_image->width_attr = VME_D16 | VME_D32;
2352 memset(&(master_image->bus_resource), 0,
2353 sizeof(struct resource));
2354 master_image->kern_base = NULL;
2355 list_add_tail(&(master_image->list),
2356 &(tsi148_bridge->master_resources));
2357 }
2358
2359 /* Add slave windows to list */
2360 INIT_LIST_HEAD(&(tsi148_bridge->slave_resources));
2361 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2362 slave_image = (struct vme_slave_resource *)kmalloc(
2363 sizeof(struct vme_slave_resource), GFP_KERNEL);
2364 if (slave_image == NULL) {
2365 dev_err(&pdev->dev, "Failed to allocate memory for "
2366 "slave resource structure\n");
2367 retval = -ENOMEM;
2368 goto err_slave;
2369 }
2370 slave_image->parent = tsi148_bridge;
2371 mutex_init(&(slave_image->mtx));
2372 slave_image->locked = 0;
2373 slave_image->number = i;
2374 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2375 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2376 VME_USER3 | VME_USER4;
2377 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2378 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2379 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2380 VME_PROG | VME_DATA;
2381 list_add_tail(&(slave_image->list),
2382 &(tsi148_bridge->slave_resources));
2383 }
2384
2385 /* Add dma engines to list */
2386 INIT_LIST_HEAD(&(tsi148_bridge->dma_resources));
2387 for (i = 0; i < TSI148_MAX_DMA; i++) {
2388 dma_ctrlr = (struct vme_dma_resource *)kmalloc(
2389 sizeof(struct vme_dma_resource), GFP_KERNEL);
2390 if (dma_ctrlr == NULL) {
2391 dev_err(&pdev->dev, "Failed to allocate memory for "
2392 "dma resource structure\n");
2393 retval = -ENOMEM;
2394 goto err_dma;
2395 }
2396 dma_ctrlr->parent = tsi148_bridge;
2397 mutex_init(&(dma_ctrlr->mtx));
2398 dma_ctrlr->locked = 0;
2399 dma_ctrlr->number = i;
2400 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2401 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2402 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2403 VME_DMA_PATTERN_TO_MEM;
2404 INIT_LIST_HEAD(&(dma_ctrlr->pending));
2405 INIT_LIST_HEAD(&(dma_ctrlr->running));
2406 list_add_tail(&(dma_ctrlr->list),
2407 &(tsi148_bridge->dma_resources));
2408 }
2409
2410 /* Add location monitor to list */
2411 INIT_LIST_HEAD(&(tsi148_bridge->lm_resources));
2412 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2413 if (lm == NULL) {
2414 dev_err(&pdev->dev, "Failed to allocate memory for "
2415 "location monitor resource structure\n");
2416 retval = -ENOMEM;
2417 goto err_lm;
2418 }
2419 lm->parent = tsi148_bridge;
2420 mutex_init(&(lm->mtx));
2421 lm->locked = 0;
2422 lm->number = 1;
2423 lm->monitors = 4;
2424 list_add_tail(&(lm->list), &(tsi148_bridge->lm_resources));
2425
2426 tsi148_bridge->slave_get = tsi148_slave_get;
2427 tsi148_bridge->slave_set = tsi148_slave_set;
2428 tsi148_bridge->master_get = tsi148_master_get;
2429 tsi148_bridge->master_set = tsi148_master_set;
2430 tsi148_bridge->master_read = tsi148_master_read;
2431 tsi148_bridge->master_write = tsi148_master_write;
2432 tsi148_bridge->master_rmw = tsi148_master_rmw;
2433 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2434 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2435 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2436 tsi148_bridge->irq_set = tsi148_irq_set;
2437 tsi148_bridge->irq_generate = tsi148_irq_generate;
2438 tsi148_bridge->lm_set = tsi148_lm_set;
2439 tsi148_bridge->lm_get = tsi148_lm_get;
2440 tsi148_bridge->lm_attach = tsi148_lm_attach;
2441 tsi148_bridge->lm_detach = tsi148_lm_detach;
2442 tsi148_bridge->slot_get = tsi148_slot_get;
2443
2444 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2445 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2446 (data & TSI148_LCSR_VSTAT_SCONS)? "" : " not");
2447 if (!geoid)
2448 dev_info(&pdev->dev, "VME geographical address is %d\n",
2449 data & TSI148_LCSR_VSTAT_GA_M);
2450 else
2451 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2452 geoid);
2453
2454 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2455 err_chk ? "enabled" : "disabled");
2456
2457 if (tsi148_crcsr_init(tsi148_bridge, pdev))
2458 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2459 goto err_crcsr;
2460
2461 retval = vme_register_bridge(tsi148_bridge);
2462 if (retval != 0) {
2463 dev_err(&pdev->dev, "Chip Registration failed.\n");
2464 goto err_reg;
2465 }
2466
2467 pci_set_drvdata(pdev, tsi148_bridge);
2468
2469 /* Clear VME bus "board fail", and "power-up reset" lines */
2470 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2471 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2472 data |= TSI148_LCSR_VSTAT_CPURST;
2473 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2474
2475 return 0;
2476
2477 vme_unregister_bridge(tsi148_bridge);
2478 err_reg:
2479 tsi148_crcsr_exit(tsi148_bridge, pdev);
2480 err_crcsr:
2481 err_lm:
2482 /* resources are stored in link list */
2483 list_for_each(pos, &(tsi148_bridge->lm_resources)) {
2484 lm = list_entry(pos, struct vme_lm_resource, list);
2485 list_del(pos);
2486 kfree(lm);
2487 }
2488 err_dma:
2489 /* resources are stored in link list */
2490 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2491 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2492 list_del(pos);
2493 kfree(dma_ctrlr);
2494 }
2495 err_slave:
2496 /* resources are stored in link list */
2497 list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2498 slave_image = list_entry(pos, struct vme_slave_resource, list);
2499 list_del(pos);
2500 kfree(slave_image);
2501 }
2502 err_master:
2503 /* resources are stored in link list */
2504 list_for_each(pos, &(tsi148_bridge->master_resources)) {
2505 master_image = list_entry(pos, struct vme_master_resource, list);
2506 list_del(pos);
2507 kfree(master_image);
2508 }
2509
2510 tsi148_irq_exit(tsi148_device, pdev);
2511 err_irq:
2512 err_test:
2513 iounmap(tsi148_device->base);
2514 err_remap:
2515 pci_release_regions(pdev);
2516 err_resource:
2517 pci_disable_device(pdev);
2518 err_enable:
2519 kfree(tsi148_device);
2520 err_driver:
2521 kfree(tsi148_bridge);
2522 err_struct:
2523 return retval;
2524
2525 }
2526
2527 static void tsi148_remove(struct pci_dev *pdev)
2528 {
2529 struct list_head *pos = NULL;
2530 struct vme_master_resource *master_image;
2531 struct vme_slave_resource *slave_image;
2532 struct vme_dma_resource *dma_ctrlr;
2533 int i;
2534 struct tsi148_driver *bridge;
2535 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2536
2537 bridge = tsi148_bridge->driver_priv;
2538
2539
2540 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2541
2542 /*
2543 * Shutdown all inbound and outbound windows.
2544 */
2545 for (i = 0; i < 8; i++) {
2546 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2547 TSI148_LCSR_OFFSET_ITAT);
2548 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2549 TSI148_LCSR_OFFSET_OTAT);
2550 }
2551
2552 /*
2553 * Shutdown Location monitor.
2554 */
2555 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2556
2557 /*
2558 * Shutdown CRG map.
2559 */
2560 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2561
2562 /*
2563 * Clear error status.
2564 */
2565 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2566 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2567 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2568
2569 /*
2570 * Remove VIRQ interrupt (if any)
2571 */
2572 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2573 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2574
2575 /*
2576 * Map all Interrupts to PCI INTA
2577 */
2578 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2579 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2580
2581 tsi148_irq_exit(bridge, pdev);
2582
2583 vme_unregister_bridge(tsi148_bridge);
2584
2585 tsi148_crcsr_exit(tsi148_bridge, pdev);
2586
2587 /* resources are stored in link list */
2588 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2589 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2590 list_del(pos);
2591 kfree(dma_ctrlr);
2592 }
2593
2594 /* resources are stored in link list */
2595 list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2596 slave_image = list_entry(pos, struct vme_slave_resource, list);
2597 list_del(pos);
2598 kfree(slave_image);
2599 }
2600
2601 /* resources are stored in link list */
2602 list_for_each(pos, &(tsi148_bridge->master_resources)) {
2603 master_image = list_entry(pos, struct vme_master_resource,
2604 list);
2605 list_del(pos);
2606 kfree(master_image);
2607 }
2608
2609 tsi148_irq_exit(bridge, pdev);
2610
2611 iounmap(bridge->base);
2612
2613 pci_release_regions(pdev);
2614
2615 pci_disable_device(pdev);
2616
2617 kfree(tsi148_bridge->driver_priv);
2618
2619 kfree(tsi148_bridge);
2620 }
2621
2622 static void __exit tsi148_exit(void)
2623 {
2624 pci_unregister_driver(&tsi148_driver);
2625
2626 printk(KERN_DEBUG "Driver removed.\n");
2627 }
2628
2629 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2630 module_param(err_chk, bool, 0);
2631
2632 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2633 module_param(geoid, int, 0);
2634
2635 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2636 MODULE_LICENSE("GPL");
2637
2638 module_init(tsi148_init);
2639 module_exit(tsi148_exit);