b00a53e793e7ad4cc6dcd68c4d003850a5982068
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / vme / bridges / vme_tsi148.c
1 /*
2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32
33 #include "../vme.h"
34 #include "../vme_bridge.h"
35 #include "vme_tsi148.h"
36
37 static int __init tsi148_init(void);
38 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
39 static void tsi148_remove(struct pci_dev *);
40 static void __exit tsi148_exit(void);
41
42
43 /* Module parameter */
44 static int err_chk;
45 static int geoid;
46
47 static char driver_name[] = "vme_tsi148";
48
49 static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
50 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
51 { },
52 };
53
54 static struct pci_driver tsi148_driver = {
55 .name = driver_name,
56 .id_table = tsi148_ids,
57 .probe = tsi148_probe,
58 .remove = tsi148_remove,
59 };
60
61 static void reg_join(unsigned int high, unsigned int low,
62 unsigned long long *variable)
63 {
64 *variable = (unsigned long long)high << 32;
65 *variable |= (unsigned long long)low;
66 }
67
68 static void reg_split(unsigned long long variable, unsigned int *high,
69 unsigned int *low)
70 {
71 *low = (unsigned int)variable & 0xFFFFFFFF;
72 *high = (unsigned int)(variable >> 32);
73 }
74
75 /*
76 * Wakes up DMA queue.
77 */
78 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
79 int channel_mask)
80 {
81 u32 serviced = 0;
82
83 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
84 wake_up(&bridge->dma_queue[0]);
85 serviced |= TSI148_LCSR_INTC_DMA0C;
86 }
87 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
88 wake_up(&bridge->dma_queue[1]);
89 serviced |= TSI148_LCSR_INTC_DMA1C;
90 }
91
92 return serviced;
93 }
94
95 /*
96 * Wake up location monitor queue
97 */
98 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
99 {
100 int i;
101 u32 serviced = 0;
102
103 for (i = 0; i < 4; i++) {
104 if (stat & TSI148_LCSR_INTS_LMS[i]) {
105 /* We only enable interrupts if the callback is set */
106 bridge->lm_callback[i](i);
107 serviced |= TSI148_LCSR_INTC_LMC[i];
108 }
109 }
110
111 return serviced;
112 }
113
114 /*
115 * Wake up mail box queue.
116 *
117 * XXX This functionality is not exposed up though API.
118 */
119 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
120 {
121 int i;
122 u32 val;
123 u32 serviced = 0;
124 struct tsi148_driver *bridge;
125
126 bridge = tsi148_bridge->driver_priv;
127
128 for (i = 0; i < 4; i++) {
129 if (stat & TSI148_LCSR_INTS_MBS[i]) {
130 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
131 dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
132 ": 0x%x\n", i, val);
133 serviced |= TSI148_LCSR_INTC_MBC[i];
134 }
135 }
136
137 return serviced;
138 }
139
140 /*
141 * Display error & status message when PERR (PCI) exception interrupt occurs.
142 */
143 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
144 {
145 struct tsi148_driver *bridge;
146
147 bridge = tsi148_bridge->driver_priv;
148
149 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
150 "attributes: %08x\n",
151 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
152 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
153 ioread32be(bridge->base + TSI148_LCSR_EDPAT));
154
155 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
156 "completion reg: %08x\n",
157 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
158 ioread32be(bridge->base + TSI148_LCSR_EDPXS));
159
160 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
161
162 return TSI148_LCSR_INTC_PERRC;
163 }
164
165 /*
166 * Save address and status when VME error interrupt occurs.
167 */
168 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
169 {
170 unsigned int error_addr_high, error_addr_low;
171 unsigned long long error_addr;
172 u32 error_attrib;
173 struct vme_bus_error *error;
174 struct tsi148_driver *bridge;
175
176 bridge = tsi148_bridge->driver_priv;
177
178 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
179 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
180 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
181
182 reg_join(error_addr_high, error_addr_low, &error_addr);
183
184 /* Check for exception register overflow (we have lost error data) */
185 if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
186 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
187 "Occurred\n");
188 }
189
190 error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
191 if (error) {
192 error->address = error_addr;
193 error->attributes = error_attrib;
194 list_add_tail(&error->list, &tsi148_bridge->vme_errors);
195 } else {
196 dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
197 "VMEbus Error reporting\n");
198 dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
199 "0x%llx, attributes: %08x\n", error_addr, error_attrib);
200 }
201
202 /* Clear Status */
203 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
204
205 return TSI148_LCSR_INTC_VERRC;
206 }
207
208 /*
209 * Wake up IACK queue.
210 */
211 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
212 {
213 wake_up(&bridge->iack_queue);
214
215 return TSI148_LCSR_INTC_IACKC;
216 }
217
218 /*
219 * Calling VME bus interrupt callback if provided.
220 */
221 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
222 u32 stat)
223 {
224 int vec, i, serviced = 0;
225 struct tsi148_driver *bridge;
226
227 bridge = tsi148_bridge->driver_priv;
228
229 for (i = 7; i > 0; i--) {
230 if (stat & (1 << i)) {
231 /*
232 * Note: Even though the registers are defined as
233 * 32-bits in the spec, we only want to issue 8-bit
234 * IACK cycles on the bus, read from offset 3.
235 */
236 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
237
238 vme_irq_handler(tsi148_bridge, i, vec);
239
240 serviced |= (1 << i);
241 }
242 }
243
244 return serviced;
245 }
246
247 /*
248 * Top level interrupt handler. Clears appropriate interrupt status bits and
249 * then calls appropriate sub handler(s).
250 */
251 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
252 {
253 u32 stat, enable, serviced = 0;
254 struct vme_bridge *tsi148_bridge;
255 struct tsi148_driver *bridge;
256
257 tsi148_bridge = ptr;
258
259 bridge = tsi148_bridge->driver_priv;
260
261 /* Determine which interrupts are unmasked and set */
262 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
263 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
264
265 /* Only look at unmasked interrupts */
266 stat &= enable;
267
268 if (unlikely(!stat))
269 return IRQ_NONE;
270
271 /* Call subhandlers as appropriate */
272 /* DMA irqs */
273 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
274 serviced |= tsi148_DMA_irqhandler(bridge, stat);
275
276 /* Location monitor irqs */
277 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
278 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
279 serviced |= tsi148_LM_irqhandler(bridge, stat);
280
281 /* Mail box irqs */
282 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
283 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
284 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
285
286 /* PCI bus error */
287 if (stat & TSI148_LCSR_INTS_PERRS)
288 serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
289
290 /* VME bus error */
291 if (stat & TSI148_LCSR_INTS_VERRS)
292 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
293
294 /* IACK irq */
295 if (stat & TSI148_LCSR_INTS_IACKS)
296 serviced |= tsi148_IACK_irqhandler(bridge);
297
298 /* VME bus irqs */
299 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
300 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
301 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
302 TSI148_LCSR_INTS_IRQ1S))
303 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
304
305 /* Clear serviced interrupts */
306 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
307
308 return IRQ_HANDLED;
309 }
310
311 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
312 {
313 int result;
314 unsigned int tmp;
315 struct pci_dev *pdev;
316 struct tsi148_driver *bridge;
317
318 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
319
320 bridge = tsi148_bridge->driver_priv;
321
322 /* Initialise list for VME bus errors */
323 INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
324
325 mutex_init(&tsi148_bridge->irq_mtx);
326
327 result = request_irq(pdev->irq,
328 tsi148_irqhandler,
329 IRQF_SHARED,
330 driver_name, tsi148_bridge);
331 if (result) {
332 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
333 "vector %02X\n", pdev->irq);
334 return result;
335 }
336
337 /* Enable and unmask interrupts */
338 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
339 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
340 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
341 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
342 TSI148_LCSR_INTEO_IACKEO;
343
344 /* This leaves the following interrupts masked.
345 * TSI148_LCSR_INTEO_VIEEO
346 * TSI148_LCSR_INTEO_SYSFLEO
347 * TSI148_LCSR_INTEO_ACFLEO
348 */
349
350 /* Don't enable Location Monitor interrupts here - they will be
351 * enabled when the location monitors are properly configured and
352 * a callback has been attached.
353 * TSI148_LCSR_INTEO_LM0EO
354 * TSI148_LCSR_INTEO_LM1EO
355 * TSI148_LCSR_INTEO_LM2EO
356 * TSI148_LCSR_INTEO_LM3EO
357 */
358
359 /* Don't enable VME interrupts until we add a handler, else the board
360 * will respond to it and we don't want that unless it knows how to
361 * properly deal with it.
362 * TSI148_LCSR_INTEO_IRQ7EO
363 * TSI148_LCSR_INTEO_IRQ6EO
364 * TSI148_LCSR_INTEO_IRQ5EO
365 * TSI148_LCSR_INTEO_IRQ4EO
366 * TSI148_LCSR_INTEO_IRQ3EO
367 * TSI148_LCSR_INTEO_IRQ2EO
368 * TSI148_LCSR_INTEO_IRQ1EO
369 */
370
371 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
372 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
373
374 return 0;
375 }
376
377 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
378 struct pci_dev *pdev)
379 {
380 struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
381
382 /* Turn off interrupts */
383 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
384 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
385
386 /* Clear all interrupts */
387 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
388
389 /* Detach interrupt handler */
390 free_irq(pdev->irq, tsi148_bridge);
391 }
392
393 /*
394 * Check to see if an IACk has been received, return true (1) or false (0).
395 */
396 static int tsi148_iack_received(struct tsi148_driver *bridge)
397 {
398 u32 tmp;
399
400 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
401
402 if (tmp & TSI148_LCSR_VICR_IRQS)
403 return 0;
404 else
405 return 1;
406 }
407
408 /*
409 * Configure VME interrupt
410 */
411 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
412 int state, int sync)
413 {
414 struct pci_dev *pdev;
415 u32 tmp;
416 struct tsi148_driver *bridge;
417
418 bridge = tsi148_bridge->driver_priv;
419
420 /* We need to do the ordering differently for enabling and disabling */
421 if (state == 0) {
422 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
423 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
424 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
425
426 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
427 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
428 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
429
430 if (sync != 0) {
431 pdev = container_of(tsi148_bridge->parent,
432 struct pci_dev, dev);
433
434 synchronize_irq(pdev->irq);
435 }
436 } else {
437 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
438 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
439 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
440
441 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
442 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
443 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
444 }
445 }
446
447 /*
448 * Generate a VME bus interrupt at the requested level & vector. Wait for
449 * interrupt to be acked.
450 */
451 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
452 int statid)
453 {
454 u32 tmp;
455 struct tsi148_driver *bridge;
456
457 bridge = tsi148_bridge->driver_priv;
458
459 mutex_lock(&bridge->vme_int);
460
461 /* Read VICR register */
462 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
463
464 /* Set Status/ID */
465 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
466 (statid & TSI148_LCSR_VICR_STID_M);
467 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
468
469 /* Assert VMEbus IRQ */
470 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
471 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
472
473 /* XXX Consider implementing a timeout? */
474 wait_event_interruptible(bridge->iack_queue,
475 tsi148_iack_received(bridge));
476
477 mutex_unlock(&bridge->vme_int);
478
479 return 0;
480 }
481
482 /*
483 * Find the first error in this address range
484 */
485 static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
486 vme_address_t aspace, unsigned long long address, size_t count)
487 {
488 struct list_head *err_pos;
489 struct vme_bus_error *vme_err, *valid = NULL;
490 unsigned long long bound;
491
492 bound = address + count;
493
494 /*
495 * XXX We are currently not looking at the address space when parsing
496 * for errors. This is because parsing the Address Modifier Codes
497 * is going to be quite resource intensive to do properly. We
498 * should be OK just looking at the addresses and this is certainly
499 * much better than what we had before.
500 */
501 err_pos = NULL;
502 /* Iterate through errors */
503 list_for_each(err_pos, &tsi148_bridge->vme_errors) {
504 vme_err = list_entry(err_pos, struct vme_bus_error, list);
505 if ((vme_err->address >= address) &&
506 (vme_err->address < bound)) {
507
508 valid = vme_err;
509 break;
510 }
511 }
512
513 return valid;
514 }
515
516 /*
517 * Clear errors in the provided address range.
518 */
519 static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
520 vme_address_t aspace, unsigned long long address, size_t count)
521 {
522 struct list_head *err_pos, *temp;
523 struct vme_bus_error *vme_err;
524 unsigned long long bound;
525
526 bound = address + count;
527
528 /*
529 * XXX We are currently not looking at the address space when parsing
530 * for errors. This is because parsing the Address Modifier Codes
531 * is going to be quite resource intensive to do properly. We
532 * should be OK just looking at the addresses and this is certainly
533 * much better than what we had before.
534 */
535 err_pos = NULL;
536 /* Iterate through errors */
537 list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
538 vme_err = list_entry(err_pos, struct vme_bus_error, list);
539
540 if ((vme_err->address >= address) &&
541 (vme_err->address < bound)) {
542
543 list_del(err_pos);
544 kfree(vme_err);
545 }
546 }
547 }
548
549 /*
550 * Initialize a slave window with the requested attributes.
551 */
552 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
553 unsigned long long vme_base, unsigned long long size,
554 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
555 {
556 unsigned int i, addr = 0, granularity = 0;
557 unsigned int temp_ctl = 0;
558 unsigned int vme_base_low, vme_base_high;
559 unsigned int vme_bound_low, vme_bound_high;
560 unsigned int pci_offset_low, pci_offset_high;
561 unsigned long long vme_bound, pci_offset;
562 struct vme_bridge *tsi148_bridge;
563 struct tsi148_driver *bridge;
564
565 tsi148_bridge = image->parent;
566 bridge = tsi148_bridge->driver_priv;
567
568 i = image->number;
569
570 switch (aspace) {
571 case VME_A16:
572 granularity = 0x10;
573 addr |= TSI148_LCSR_ITAT_AS_A16;
574 break;
575 case VME_A24:
576 granularity = 0x1000;
577 addr |= TSI148_LCSR_ITAT_AS_A24;
578 break;
579 case VME_A32:
580 granularity = 0x10000;
581 addr |= TSI148_LCSR_ITAT_AS_A32;
582 break;
583 case VME_A64:
584 granularity = 0x10000;
585 addr |= TSI148_LCSR_ITAT_AS_A64;
586 break;
587 case VME_CRCSR:
588 case VME_USER1:
589 case VME_USER2:
590 case VME_USER3:
591 case VME_USER4:
592 default:
593 dev_err(tsi148_bridge->parent, "Invalid address space\n");
594 return -EINVAL;
595 break;
596 }
597
598 /* Convert 64-bit variables to 2x 32-bit variables */
599 reg_split(vme_base, &vme_base_high, &vme_base_low);
600
601 /*
602 * Bound address is a valid address for the window, adjust
603 * accordingly
604 */
605 vme_bound = vme_base + size - granularity;
606 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
607 pci_offset = (unsigned long long)pci_base - vme_base;
608 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
609
610 if (vme_base_low & (granularity - 1)) {
611 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
612 return -EINVAL;
613 }
614 if (vme_bound_low & (granularity - 1)) {
615 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
616 return -EINVAL;
617 }
618 if (pci_offset_low & (granularity - 1)) {
619 dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
620 "alignment\n");
621 return -EINVAL;
622 }
623
624 /* Disable while we are mucking around */
625 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
626 TSI148_LCSR_OFFSET_ITAT);
627 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
628 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
629 TSI148_LCSR_OFFSET_ITAT);
630
631 /* Setup mapping */
632 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
633 TSI148_LCSR_OFFSET_ITSAU);
634 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
635 TSI148_LCSR_OFFSET_ITSAL);
636 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
637 TSI148_LCSR_OFFSET_ITEAU);
638 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
639 TSI148_LCSR_OFFSET_ITEAL);
640 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
641 TSI148_LCSR_OFFSET_ITOFU);
642 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
643 TSI148_LCSR_OFFSET_ITOFL);
644
645 /* Setup 2eSST speeds */
646 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
647 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
648 case VME_2eSST160:
649 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
650 break;
651 case VME_2eSST267:
652 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
653 break;
654 case VME_2eSST320:
655 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
656 break;
657 }
658
659 /* Setup cycle types */
660 temp_ctl &= ~(0x1F << 7);
661 if (cycle & VME_BLT)
662 temp_ctl |= TSI148_LCSR_ITAT_BLT;
663 if (cycle & VME_MBLT)
664 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
665 if (cycle & VME_2eVME)
666 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
667 if (cycle & VME_2eSST)
668 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
669 if (cycle & VME_2eSSTB)
670 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
671
672 /* Setup address space */
673 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
674 temp_ctl |= addr;
675
676 temp_ctl &= ~0xF;
677 if (cycle & VME_SUPER)
678 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
679 if (cycle & VME_USER)
680 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
681 if (cycle & VME_PROG)
682 temp_ctl |= TSI148_LCSR_ITAT_PGM;
683 if (cycle & VME_DATA)
684 temp_ctl |= TSI148_LCSR_ITAT_DATA;
685
686 /* Write ctl reg without enable */
687 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
688 TSI148_LCSR_OFFSET_ITAT);
689
690 if (enabled)
691 temp_ctl |= TSI148_LCSR_ITAT_EN;
692
693 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
694 TSI148_LCSR_OFFSET_ITAT);
695
696 return 0;
697 }
698
699 /*
700 * Get slave window configuration.
701 */
702 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
703 unsigned long long *vme_base, unsigned long long *size,
704 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
705 {
706 unsigned int i, granularity = 0, ctl = 0;
707 unsigned int vme_base_low, vme_base_high;
708 unsigned int vme_bound_low, vme_bound_high;
709 unsigned int pci_offset_low, pci_offset_high;
710 unsigned long long vme_bound, pci_offset;
711 struct tsi148_driver *bridge;
712
713 bridge = image->parent->driver_priv;
714
715 i = image->number;
716
717 /* Read registers */
718 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
719 TSI148_LCSR_OFFSET_ITAT);
720
721 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
722 TSI148_LCSR_OFFSET_ITSAU);
723 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
724 TSI148_LCSR_OFFSET_ITSAL);
725 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
726 TSI148_LCSR_OFFSET_ITEAU);
727 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
728 TSI148_LCSR_OFFSET_ITEAL);
729 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
730 TSI148_LCSR_OFFSET_ITOFU);
731 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
732 TSI148_LCSR_OFFSET_ITOFL);
733
734 /* Convert 64-bit variables to 2x 32-bit variables */
735 reg_join(vme_base_high, vme_base_low, vme_base);
736 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
737 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
738
739 *pci_base = (dma_addr_t)vme_base + pci_offset;
740
741 *enabled = 0;
742 *aspace = 0;
743 *cycle = 0;
744
745 if (ctl & TSI148_LCSR_ITAT_EN)
746 *enabled = 1;
747
748 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
749 granularity = 0x10;
750 *aspace |= VME_A16;
751 }
752 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
753 granularity = 0x1000;
754 *aspace |= VME_A24;
755 }
756 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
757 granularity = 0x10000;
758 *aspace |= VME_A32;
759 }
760 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
761 granularity = 0x10000;
762 *aspace |= VME_A64;
763 }
764
765 /* Need granularity before we set the size */
766 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
767
768
769 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
770 *cycle |= VME_2eSST160;
771 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
772 *cycle |= VME_2eSST267;
773 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
774 *cycle |= VME_2eSST320;
775
776 if (ctl & TSI148_LCSR_ITAT_BLT)
777 *cycle |= VME_BLT;
778 if (ctl & TSI148_LCSR_ITAT_MBLT)
779 *cycle |= VME_MBLT;
780 if (ctl & TSI148_LCSR_ITAT_2eVME)
781 *cycle |= VME_2eVME;
782 if (ctl & TSI148_LCSR_ITAT_2eSST)
783 *cycle |= VME_2eSST;
784 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
785 *cycle |= VME_2eSSTB;
786
787 if (ctl & TSI148_LCSR_ITAT_SUPR)
788 *cycle |= VME_SUPER;
789 if (ctl & TSI148_LCSR_ITAT_NPRIV)
790 *cycle |= VME_USER;
791 if (ctl & TSI148_LCSR_ITAT_PGM)
792 *cycle |= VME_PROG;
793 if (ctl & TSI148_LCSR_ITAT_DATA)
794 *cycle |= VME_DATA;
795
796 return 0;
797 }
798
799 /*
800 * Allocate and map PCI Resource
801 */
802 static int tsi148_alloc_resource(struct vme_master_resource *image,
803 unsigned long long size)
804 {
805 unsigned long long existing_size;
806 int retval = 0;
807 struct pci_dev *pdev;
808 struct vme_bridge *tsi148_bridge;
809
810 tsi148_bridge = image->parent;
811
812 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
813
814 existing_size = (unsigned long long)(image->bus_resource.end -
815 image->bus_resource.start);
816
817 /* If the existing size is OK, return */
818 if ((size != 0) && (existing_size == (size - 1)))
819 return 0;
820
821 if (existing_size != 0) {
822 iounmap(image->kern_base);
823 image->kern_base = NULL;
824 kfree(image->bus_resource.name);
825 release_resource(&image->bus_resource);
826 memset(&image->bus_resource, 0, sizeof(struct resource));
827 }
828
829 /* Exit here if size is zero */
830 if (size == 0)
831 return 0;
832
833 if (image->bus_resource.name == NULL) {
834 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
835 if (image->bus_resource.name == NULL) {
836 dev_err(tsi148_bridge->parent, "Unable to allocate "
837 "memory for resource name\n");
838 retval = -ENOMEM;
839 goto err_name;
840 }
841 }
842
843 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
844 image->number);
845
846 image->bus_resource.start = 0;
847 image->bus_resource.end = (unsigned long)size;
848 image->bus_resource.flags = IORESOURCE_MEM;
849
850 retval = pci_bus_alloc_resource(pdev->bus,
851 &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
852 0, NULL, NULL);
853 if (retval) {
854 dev_err(tsi148_bridge->parent, "Failed to allocate mem "
855 "resource for window %d size 0x%lx start 0x%lx\n",
856 image->number, (unsigned long)size,
857 (unsigned long)image->bus_resource.start);
858 goto err_resource;
859 }
860
861 image->kern_base = ioremap_nocache(
862 image->bus_resource.start, size);
863 if (image->kern_base == NULL) {
864 dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
865 retval = -ENOMEM;
866 goto err_remap;
867 }
868
869 return 0;
870
871 err_remap:
872 release_resource(&image->bus_resource);
873 err_resource:
874 kfree(image->bus_resource.name);
875 memset(&image->bus_resource, 0, sizeof(struct resource));
876 err_name:
877 return retval;
878 }
879
880 /*
881 * Free and unmap PCI Resource
882 */
883 static void tsi148_free_resource(struct vme_master_resource *image)
884 {
885 iounmap(image->kern_base);
886 image->kern_base = NULL;
887 release_resource(&image->bus_resource);
888 kfree(image->bus_resource.name);
889 memset(&image->bus_resource, 0, sizeof(struct resource));
890 }
891
892 /*
893 * Set the attributes of an outbound window.
894 */
895 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
896 unsigned long long vme_base, unsigned long long size,
897 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
898 {
899 int retval = 0;
900 unsigned int i;
901 unsigned int temp_ctl = 0;
902 unsigned int pci_base_low, pci_base_high;
903 unsigned int pci_bound_low, pci_bound_high;
904 unsigned int vme_offset_low, vme_offset_high;
905 unsigned long long pci_bound, vme_offset, pci_base;
906 struct vme_bridge *tsi148_bridge;
907 struct tsi148_driver *bridge;
908
909 tsi148_bridge = image->parent;
910
911 bridge = tsi148_bridge->driver_priv;
912
913 /* Verify input data */
914 if (vme_base & 0xFFFF) {
915 dev_err(tsi148_bridge->parent, "Invalid VME Window "
916 "alignment\n");
917 retval = -EINVAL;
918 goto err_window;
919 }
920
921 if ((size == 0) && (enabled != 0)) {
922 dev_err(tsi148_bridge->parent, "Size must be non-zero for "
923 "enabled windows\n");
924 retval = -EINVAL;
925 goto err_window;
926 }
927
928 spin_lock(&image->lock);
929
930 /* Let's allocate the resource here rather than further up the stack as
931 * it avoids pushing loads of bus dependant stuff up the stack. If size
932 * is zero, any existing resource will be freed.
933 */
934 retval = tsi148_alloc_resource(image, size);
935 if (retval) {
936 spin_unlock(&image->lock);
937 dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
938 "resource\n");
939 goto err_res;
940 }
941
942 if (size == 0) {
943 pci_base = 0;
944 pci_bound = 0;
945 vme_offset = 0;
946 } else {
947 pci_base = (unsigned long long)image->bus_resource.start;
948
949 /*
950 * Bound address is a valid address for the window, adjust
951 * according to window granularity.
952 */
953 pci_bound = pci_base + (size - 0x10000);
954 vme_offset = vme_base - pci_base;
955 }
956
957 /* Convert 64-bit variables to 2x 32-bit variables */
958 reg_split(pci_base, &pci_base_high, &pci_base_low);
959 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
960 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
961
962 if (pci_base_low & 0xFFFF) {
963 spin_unlock(&image->lock);
964 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
965 retval = -EINVAL;
966 goto err_gran;
967 }
968 if (pci_bound_low & 0xFFFF) {
969 spin_unlock(&image->lock);
970 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
971 retval = -EINVAL;
972 goto err_gran;
973 }
974 if (vme_offset_low & 0xFFFF) {
975 spin_unlock(&image->lock);
976 dev_err(tsi148_bridge->parent, "Invalid VME Offset "
977 "alignment\n");
978 retval = -EINVAL;
979 goto err_gran;
980 }
981
982 i = image->number;
983
984 /* Disable while we are mucking around */
985 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
986 TSI148_LCSR_OFFSET_OTAT);
987 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
988 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
989 TSI148_LCSR_OFFSET_OTAT);
990
991 /* Setup 2eSST speeds */
992 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
993 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
994 case VME_2eSST160:
995 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
996 break;
997 case VME_2eSST267:
998 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
999 break;
1000 case VME_2eSST320:
1001 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1002 break;
1003 }
1004
1005 /* Setup cycle types */
1006 if (cycle & VME_BLT) {
1007 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1008 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1009 }
1010 if (cycle & VME_MBLT) {
1011 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1012 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1013 }
1014 if (cycle & VME_2eVME) {
1015 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1016 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1017 }
1018 if (cycle & VME_2eSST) {
1019 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1020 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1021 }
1022 if (cycle & VME_2eSSTB) {
1023 dev_warn(tsi148_bridge->parent, "Currently not setting "
1024 "Broadcast Select Registers\n");
1025 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1026 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1027 }
1028
1029 /* Setup data width */
1030 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1031 switch (dwidth) {
1032 case VME_D16:
1033 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1034 break;
1035 case VME_D32:
1036 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1037 break;
1038 default:
1039 spin_unlock(&image->lock);
1040 dev_err(tsi148_bridge->parent, "Invalid data width\n");
1041 retval = -EINVAL;
1042 goto err_dwidth;
1043 }
1044
1045 /* Setup address space */
1046 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1047 switch (aspace) {
1048 case VME_A16:
1049 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1050 break;
1051 case VME_A24:
1052 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1053 break;
1054 case VME_A32:
1055 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1056 break;
1057 case VME_A64:
1058 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1059 break;
1060 case VME_CRCSR:
1061 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1062 break;
1063 case VME_USER1:
1064 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1065 break;
1066 case VME_USER2:
1067 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1068 break;
1069 case VME_USER3:
1070 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1071 break;
1072 case VME_USER4:
1073 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1074 break;
1075 default:
1076 spin_unlock(&image->lock);
1077 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1078 retval = -EINVAL;
1079 goto err_aspace;
1080 break;
1081 }
1082
1083 temp_ctl &= ~(3<<4);
1084 if (cycle & VME_SUPER)
1085 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1086 if (cycle & VME_PROG)
1087 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1088
1089 /* Setup mapping */
1090 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1091 TSI148_LCSR_OFFSET_OTSAU);
1092 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1093 TSI148_LCSR_OFFSET_OTSAL);
1094 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1095 TSI148_LCSR_OFFSET_OTEAU);
1096 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1097 TSI148_LCSR_OFFSET_OTEAL);
1098 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1099 TSI148_LCSR_OFFSET_OTOFU);
1100 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1101 TSI148_LCSR_OFFSET_OTOFL);
1102
1103 /* Write ctl reg without enable */
1104 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1105 TSI148_LCSR_OFFSET_OTAT);
1106
1107 if (enabled)
1108 temp_ctl |= TSI148_LCSR_OTAT_EN;
1109
1110 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1111 TSI148_LCSR_OFFSET_OTAT);
1112
1113 spin_unlock(&image->lock);
1114 return 0;
1115
1116 err_aspace:
1117 err_dwidth:
1118 err_gran:
1119 tsi148_free_resource(image);
1120 err_res:
1121 err_window:
1122 return retval;
1123
1124 }
1125
1126 /*
1127 * Set the attributes of an outbound window.
1128 *
1129 * XXX Not parsing prefetch information.
1130 */
1131 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1132 unsigned long long *vme_base, unsigned long long *size,
1133 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1134 {
1135 unsigned int i, ctl;
1136 unsigned int pci_base_low, pci_base_high;
1137 unsigned int pci_bound_low, pci_bound_high;
1138 unsigned int vme_offset_low, vme_offset_high;
1139
1140 unsigned long long pci_base, pci_bound, vme_offset;
1141 struct tsi148_driver *bridge;
1142
1143 bridge = image->parent->driver_priv;
1144
1145 i = image->number;
1146
1147 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1148 TSI148_LCSR_OFFSET_OTAT);
1149
1150 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1151 TSI148_LCSR_OFFSET_OTSAU);
1152 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1153 TSI148_LCSR_OFFSET_OTSAL);
1154 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1155 TSI148_LCSR_OFFSET_OTEAU);
1156 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1157 TSI148_LCSR_OFFSET_OTEAL);
1158 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1159 TSI148_LCSR_OFFSET_OTOFU);
1160 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1161 TSI148_LCSR_OFFSET_OTOFL);
1162
1163 /* Convert 64-bit variables to 2x 32-bit variables */
1164 reg_join(pci_base_high, pci_base_low, &pci_base);
1165 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1166 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1167
1168 *vme_base = pci_base + vme_offset;
1169 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1170
1171 *enabled = 0;
1172 *aspace = 0;
1173 *cycle = 0;
1174 *dwidth = 0;
1175
1176 if (ctl & TSI148_LCSR_OTAT_EN)
1177 *enabled = 1;
1178
1179 /* Setup address space */
1180 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1181 *aspace |= VME_A16;
1182 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1183 *aspace |= VME_A24;
1184 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1185 *aspace |= VME_A32;
1186 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1187 *aspace |= VME_A64;
1188 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1189 *aspace |= VME_CRCSR;
1190 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1191 *aspace |= VME_USER1;
1192 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1193 *aspace |= VME_USER2;
1194 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1195 *aspace |= VME_USER3;
1196 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1197 *aspace |= VME_USER4;
1198
1199 /* Setup 2eSST speeds */
1200 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1201 *cycle |= VME_2eSST160;
1202 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1203 *cycle |= VME_2eSST267;
1204 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1205 *cycle |= VME_2eSST320;
1206
1207 /* Setup cycle types */
1208 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1209 *cycle |= VME_SCT;
1210 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1211 *cycle |= VME_BLT;
1212 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1213 *cycle |= VME_MBLT;
1214 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1215 *cycle |= VME_2eVME;
1216 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1217 *cycle |= VME_2eSST;
1218 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1219 *cycle |= VME_2eSSTB;
1220
1221 if (ctl & TSI148_LCSR_OTAT_SUP)
1222 *cycle |= VME_SUPER;
1223 else
1224 *cycle |= VME_USER;
1225
1226 if (ctl & TSI148_LCSR_OTAT_PGM)
1227 *cycle |= VME_PROG;
1228 else
1229 *cycle |= VME_DATA;
1230
1231 /* Setup data width */
1232 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1233 *dwidth = VME_D16;
1234 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1235 *dwidth = VME_D32;
1236
1237 return 0;
1238 }
1239
1240
1241 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1242 unsigned long long *vme_base, unsigned long long *size,
1243 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1244 {
1245 int retval;
1246
1247 spin_lock(&image->lock);
1248
1249 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1250 cycle, dwidth);
1251
1252 spin_unlock(&image->lock);
1253
1254 return retval;
1255 }
1256
1257 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1258 size_t count, loff_t offset)
1259 {
1260 int retval, enabled;
1261 unsigned long long vme_base, size;
1262 vme_address_t aspace;
1263 vme_cycle_t cycle;
1264 vme_width_t dwidth;
1265 struct vme_bus_error *vme_err = NULL;
1266 struct vme_bridge *tsi148_bridge;
1267
1268 tsi148_bridge = image->parent;
1269
1270 spin_lock(&image->lock);
1271
1272 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
1273 retval = count;
1274
1275 if (!err_chk)
1276 goto skip_chk;
1277
1278 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1279 &dwidth);
1280
1281 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1282 count);
1283 if (vme_err != NULL) {
1284 dev_err(image->parent->parent, "First VME read error detected "
1285 "an at address 0x%llx\n", vme_err->address);
1286 retval = vme_err->address - (vme_base + offset);
1287 /* Clear down save errors in this address range */
1288 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1289 count);
1290 }
1291
1292 skip_chk:
1293 spin_unlock(&image->lock);
1294
1295 return retval;
1296 }
1297
1298
1299 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1300 size_t count, loff_t offset)
1301 {
1302 int retval = 0, enabled;
1303 unsigned long long vme_base, size;
1304 vme_address_t aspace;
1305 vme_cycle_t cycle;
1306 vme_width_t dwidth;
1307
1308 struct vme_bus_error *vme_err = NULL;
1309 struct vme_bridge *tsi148_bridge;
1310 struct tsi148_driver *bridge;
1311
1312 tsi148_bridge = image->parent;
1313
1314 bridge = tsi148_bridge->driver_priv;
1315
1316 spin_lock(&image->lock);
1317
1318 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
1319 retval = count;
1320
1321 /*
1322 * Writes are posted. We need to do a read on the VME bus to flush out
1323 * all of the writes before we check for errors. We can't guarentee
1324 * that reading the data we have just written is safe. It is believed
1325 * that there isn't any read, write re-ordering, so we can read any
1326 * location in VME space, so lets read the Device ID from the tsi148's
1327 * own registers as mapped into CR/CSR space.
1328 *
1329 * We check for saved errors in the written address range/space.
1330 */
1331
1332 if (!err_chk)
1333 goto skip_chk;
1334
1335 /*
1336 * Get window info first, to maximise the time that the buffers may
1337 * fluch on their own
1338 */
1339 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1340 &dwidth);
1341
1342 ioread16(bridge->flush_image->kern_base + 0x7F000);
1343
1344 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1345 count);
1346 if (vme_err != NULL) {
1347 dev_warn(tsi148_bridge->parent, "First VME write error detected"
1348 " an at address 0x%llx\n", vme_err->address);
1349 retval = vme_err->address - (vme_base + offset);
1350 /* Clear down save errors in this address range */
1351 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1352 count);
1353 }
1354
1355 skip_chk:
1356 spin_unlock(&image->lock);
1357
1358 return retval;
1359 }
1360
1361 /*
1362 * Perform an RMW cycle on the VME bus.
1363 *
1364 * Requires a previously configured master window, returns final value.
1365 */
1366 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1367 unsigned int mask, unsigned int compare, unsigned int swap,
1368 loff_t offset)
1369 {
1370 unsigned long long pci_addr;
1371 unsigned int pci_addr_high, pci_addr_low;
1372 u32 tmp, result;
1373 int i;
1374 struct tsi148_driver *bridge;
1375
1376 bridge = image->parent->driver_priv;
1377
1378 /* Find the PCI address that maps to the desired VME address */
1379 i = image->number;
1380
1381 /* Locking as we can only do one of these at a time */
1382 mutex_lock(&bridge->vme_rmw);
1383
1384 /* Lock image */
1385 spin_lock(&image->lock);
1386
1387 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1388 TSI148_LCSR_OFFSET_OTSAU);
1389 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1390 TSI148_LCSR_OFFSET_OTSAL);
1391
1392 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1393 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1394
1395 /* Configure registers */
1396 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1397 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1398 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1399 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1400 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1401
1402 /* Enable RMW */
1403 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1404 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1405 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1406
1407 /* Kick process off with a read to the required address. */
1408 result = ioread32be(image->kern_base + offset);
1409
1410 /* Disable RMW */
1411 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1412 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1413 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1414
1415 spin_unlock(&image->lock);
1416
1417 mutex_unlock(&bridge->vme_rmw);
1418
1419 return result;
1420 }
1421
1422 static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
1423 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
1424 {
1425 /* Setup 2eSST speeds */
1426 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1427 case VME_2eSST160:
1428 *attr |= TSI148_LCSR_DSAT_2eSSTM_160;
1429 break;
1430 case VME_2eSST267:
1431 *attr |= TSI148_LCSR_DSAT_2eSSTM_267;
1432 break;
1433 case VME_2eSST320:
1434 *attr |= TSI148_LCSR_DSAT_2eSSTM_320;
1435 break;
1436 }
1437
1438 /* Setup cycle types */
1439 if (cycle & VME_SCT)
1440 *attr |= TSI148_LCSR_DSAT_TM_SCT;
1441
1442 if (cycle & VME_BLT)
1443 *attr |= TSI148_LCSR_DSAT_TM_BLT;
1444
1445 if (cycle & VME_MBLT)
1446 *attr |= TSI148_LCSR_DSAT_TM_MBLT;
1447
1448 if (cycle & VME_2eVME)
1449 *attr |= TSI148_LCSR_DSAT_TM_2eVME;
1450
1451 if (cycle & VME_2eSST)
1452 *attr |= TSI148_LCSR_DSAT_TM_2eSST;
1453
1454 if (cycle & VME_2eSSTB) {
1455 dev_err(dev, "Currently not setting Broadcast Select "
1456 "Registers\n");
1457 *attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
1458 }
1459
1460 /* Setup data width */
1461 switch (dwidth) {
1462 case VME_D16:
1463 *attr |= TSI148_LCSR_DSAT_DBW_16;
1464 break;
1465 case VME_D32:
1466 *attr |= TSI148_LCSR_DSAT_DBW_32;
1467 break;
1468 default:
1469 dev_err(dev, "Invalid data width\n");
1470 return -EINVAL;
1471 }
1472
1473 /* Setup address space */
1474 switch (aspace) {
1475 case VME_A16:
1476 *attr |= TSI148_LCSR_DSAT_AMODE_A16;
1477 break;
1478 case VME_A24:
1479 *attr |= TSI148_LCSR_DSAT_AMODE_A24;
1480 break;
1481 case VME_A32:
1482 *attr |= TSI148_LCSR_DSAT_AMODE_A32;
1483 break;
1484 case VME_A64:
1485 *attr |= TSI148_LCSR_DSAT_AMODE_A64;
1486 break;
1487 case VME_CRCSR:
1488 *attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1489 break;
1490 case VME_USER1:
1491 *attr |= TSI148_LCSR_DSAT_AMODE_USER1;
1492 break;
1493 case VME_USER2:
1494 *attr |= TSI148_LCSR_DSAT_AMODE_USER2;
1495 break;
1496 case VME_USER3:
1497 *attr |= TSI148_LCSR_DSAT_AMODE_USER3;
1498 break;
1499 case VME_USER4:
1500 *attr |= TSI148_LCSR_DSAT_AMODE_USER4;
1501 break;
1502 default:
1503 dev_err(dev, "Invalid address space\n");
1504 return -EINVAL;
1505 break;
1506 }
1507
1508 if (cycle & VME_SUPER)
1509 *attr |= TSI148_LCSR_DSAT_SUP;
1510 if (cycle & VME_PROG)
1511 *attr |= TSI148_LCSR_DSAT_PGM;
1512
1513 return 0;
1514 }
1515
1516 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, u32 *attr,
1517 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
1518 {
1519 /* Setup 2eSST speeds */
1520 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1521 case VME_2eSST160:
1522 *attr |= TSI148_LCSR_DDAT_2eSSTM_160;
1523 break;
1524 case VME_2eSST267:
1525 *attr |= TSI148_LCSR_DDAT_2eSSTM_267;
1526 break;
1527 case VME_2eSST320:
1528 *attr |= TSI148_LCSR_DDAT_2eSSTM_320;
1529 break;
1530 }
1531
1532 /* Setup cycle types */
1533 if (cycle & VME_SCT)
1534 *attr |= TSI148_LCSR_DDAT_TM_SCT;
1535
1536 if (cycle & VME_BLT)
1537 *attr |= TSI148_LCSR_DDAT_TM_BLT;
1538
1539 if (cycle & VME_MBLT)
1540 *attr |= TSI148_LCSR_DDAT_TM_MBLT;
1541
1542 if (cycle & VME_2eVME)
1543 *attr |= TSI148_LCSR_DDAT_TM_2eVME;
1544
1545 if (cycle & VME_2eSST)
1546 *attr |= TSI148_LCSR_DDAT_TM_2eSST;
1547
1548 if (cycle & VME_2eSSTB) {
1549 dev_err(dev, "Currently not setting Broadcast Select "
1550 "Registers\n");
1551 *attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
1552 }
1553
1554 /* Setup data width */
1555 switch (dwidth) {
1556 case VME_D16:
1557 *attr |= TSI148_LCSR_DDAT_DBW_16;
1558 break;
1559 case VME_D32:
1560 *attr |= TSI148_LCSR_DDAT_DBW_32;
1561 break;
1562 default:
1563 dev_err(dev, "Invalid data width\n");
1564 return -EINVAL;
1565 }
1566
1567 /* Setup address space */
1568 switch (aspace) {
1569 case VME_A16:
1570 *attr |= TSI148_LCSR_DDAT_AMODE_A16;
1571 break;
1572 case VME_A24:
1573 *attr |= TSI148_LCSR_DDAT_AMODE_A24;
1574 break;
1575 case VME_A32:
1576 *attr |= TSI148_LCSR_DDAT_AMODE_A32;
1577 break;
1578 case VME_A64:
1579 *attr |= TSI148_LCSR_DDAT_AMODE_A64;
1580 break;
1581 case VME_CRCSR:
1582 *attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1583 break;
1584 case VME_USER1:
1585 *attr |= TSI148_LCSR_DDAT_AMODE_USER1;
1586 break;
1587 case VME_USER2:
1588 *attr |= TSI148_LCSR_DDAT_AMODE_USER2;
1589 break;
1590 case VME_USER3:
1591 *attr |= TSI148_LCSR_DDAT_AMODE_USER3;
1592 break;
1593 case VME_USER4:
1594 *attr |= TSI148_LCSR_DDAT_AMODE_USER4;
1595 break;
1596 default:
1597 dev_err(dev, "Invalid address space\n");
1598 return -EINVAL;
1599 break;
1600 }
1601
1602 if (cycle & VME_SUPER)
1603 *attr |= TSI148_LCSR_DDAT_SUP;
1604 if (cycle & VME_PROG)
1605 *attr |= TSI148_LCSR_DDAT_PGM;
1606
1607 return 0;
1608 }
1609
1610 /*
1611 * Add a link list descriptor to the list
1612 */
1613 static int tsi148_dma_list_add(struct vme_dma_list *list,
1614 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1615 {
1616 struct tsi148_dma_entry *entry, *prev;
1617 u32 address_high, address_low;
1618 struct vme_dma_pattern *pattern_attr;
1619 struct vme_dma_pci *pci_attr;
1620 struct vme_dma_vme *vme_attr;
1621 dma_addr_t desc_ptr;
1622 int retval = 0;
1623 struct vme_bridge *tsi148_bridge;
1624
1625 tsi148_bridge = list->parent->parent;
1626
1627 /* Descriptor must be aligned on 64-bit boundaries */
1628 entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1629 if (entry == NULL) {
1630 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
1631 "dma resource structure\n");
1632 retval = -ENOMEM;
1633 goto err_mem;
1634 }
1635
1636 /* Test descriptor alignment */
1637 if ((unsigned long)&entry->descriptor & 0x7) {
1638 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1639 "byte boundary as required: %p\n",
1640 &entry->descriptor);
1641 retval = -EINVAL;
1642 goto err_align;
1643 }
1644
1645 /* Given we are going to fill out the structure, we probably don't
1646 * need to zero it, but better safe than sorry for now.
1647 */
1648 memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
1649
1650 /* Fill out source part */
1651 switch (src->type) {
1652 case VME_DMA_PATTERN:
1653 pattern_attr = src->private;
1654
1655 entry->descriptor.dsal = pattern_attr->pattern;
1656 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
1657 /* Default behaviour is 32 bit pattern */
1658 if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1659 entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
1660
1661 /* It seems that the default behaviour is to increment */
1662 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1663 entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
1664
1665 break;
1666 case VME_DMA_PCI:
1667 pci_attr = src->private;
1668
1669 reg_split((unsigned long long)pci_attr->address, &address_high,
1670 &address_low);
1671 entry->descriptor.dsau = address_high;
1672 entry->descriptor.dsal = address_low;
1673 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
1674 break;
1675 case VME_DMA_VME:
1676 vme_attr = src->private;
1677
1678 reg_split((unsigned long long)vme_attr->address, &address_high,
1679 &address_low);
1680 entry->descriptor.dsau = address_high;
1681 entry->descriptor.dsal = address_low;
1682 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
1683
1684 retval = tsi148_dma_set_vme_src_attributes(
1685 tsi148_bridge->parent, &entry->descriptor.dsat,
1686 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1687 if (retval < 0)
1688 goto err_source;
1689 break;
1690 default:
1691 dev_err(tsi148_bridge->parent, "Invalid source type\n");
1692 retval = -EINVAL;
1693 goto err_source;
1694 break;
1695 }
1696
1697 /* Assume last link - this will be over-written by adding another */
1698 entry->descriptor.dnlau = 0;
1699 entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
1700
1701
1702 /* Fill out destination part */
1703 switch (dest->type) {
1704 case VME_DMA_PCI:
1705 pci_attr = dest->private;
1706
1707 reg_split((unsigned long long)pci_attr->address, &address_high,
1708 &address_low);
1709 entry->descriptor.ddau = address_high;
1710 entry->descriptor.ddal = address_low;
1711 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
1712 break;
1713 case VME_DMA_VME:
1714 vme_attr = dest->private;
1715
1716 reg_split((unsigned long long)vme_attr->address, &address_high,
1717 &address_low);
1718 entry->descriptor.ddau = address_high;
1719 entry->descriptor.ddal = address_low;
1720 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
1721
1722 retval = tsi148_dma_set_vme_dest_attributes(
1723 tsi148_bridge->parent, &entry->descriptor.ddat,
1724 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1725 if (retval < 0)
1726 goto err_dest;
1727 break;
1728 default:
1729 dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1730 retval = -EINVAL;
1731 goto err_dest;
1732 break;
1733 }
1734
1735 /* Fill out count */
1736 entry->descriptor.dcnt = (u32)count;
1737
1738 /* Add to list */
1739 list_add_tail(&entry->list, &list->entries);
1740
1741 /* Fill out previous descriptors "Next Address" */
1742 if (entry->list.prev != &list->entries) {
1743 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1744 list);
1745 /* We need the bus address for the pointer */
1746 desc_ptr = virt_to_bus(&entry->descriptor);
1747 reg_split(desc_ptr, &prev->descriptor.dnlau,
1748 &prev->descriptor.dnlal);
1749 }
1750
1751 return 0;
1752
1753 err_dest:
1754 err_source:
1755 err_align:
1756 kfree(entry);
1757 err_mem:
1758 return retval;
1759 }
1760
1761 /*
1762 * Check to see if the provided DMA channel is busy.
1763 */
1764 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1765 {
1766 u32 tmp;
1767 struct tsi148_driver *bridge;
1768
1769 bridge = tsi148_bridge->driver_priv;
1770
1771 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1772 TSI148_LCSR_OFFSET_DSTA);
1773
1774 if (tmp & TSI148_LCSR_DSTA_BSY)
1775 return 0;
1776 else
1777 return 1;
1778
1779 }
1780
1781 /*
1782 * Execute a previously generated link list
1783 *
1784 * XXX Need to provide control register configuration.
1785 */
1786 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1787 {
1788 struct vme_dma_resource *ctrlr;
1789 int channel, retval = 0;
1790 struct tsi148_dma_entry *entry;
1791 dma_addr_t bus_addr;
1792 u32 bus_addr_high, bus_addr_low;
1793 u32 val, dctlreg = 0;
1794 struct vme_bridge *tsi148_bridge;
1795 struct tsi148_driver *bridge;
1796
1797 ctrlr = list->parent;
1798
1799 tsi148_bridge = ctrlr->parent;
1800
1801 bridge = tsi148_bridge->driver_priv;
1802
1803 mutex_lock(&ctrlr->mtx);
1804
1805 channel = ctrlr->number;
1806
1807 if (!list_empty(&ctrlr->running)) {
1808 /*
1809 * XXX We have an active DMA transfer and currently haven't
1810 * sorted out the mechanism for "pending" DMA transfers.
1811 * Return busy.
1812 */
1813 /* Need to add to pending here */
1814 mutex_unlock(&ctrlr->mtx);
1815 return -EBUSY;
1816 } else {
1817 list_add(&list->list, &ctrlr->running);
1818 }
1819
1820 /* Get first bus address and write into registers */
1821 entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1822 list);
1823
1824 bus_addr = virt_to_bus(&entry->descriptor);
1825
1826 mutex_unlock(&ctrlr->mtx);
1827
1828 reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
1829
1830 iowrite32be(bus_addr_high, bridge->base +
1831 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1832 iowrite32be(bus_addr_low, bridge->base +
1833 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1834
1835 /* Start the operation */
1836 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1837 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1838
1839 wait_event_interruptible(bridge->dma_queue[channel],
1840 tsi148_dma_busy(ctrlr->parent, channel));
1841 /*
1842 * Read status register, this register is valid until we kick off a
1843 * new transfer.
1844 */
1845 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1846 TSI148_LCSR_OFFSET_DSTA);
1847
1848 if (val & TSI148_LCSR_DSTA_VBE) {
1849 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1850 retval = -EIO;
1851 }
1852
1853 /* Remove list from running list */
1854 mutex_lock(&ctrlr->mtx);
1855 list_del(&list->list);
1856 mutex_unlock(&ctrlr->mtx);
1857
1858 return retval;
1859 }
1860
1861 /*
1862 * Clean up a previously generated link list
1863 *
1864 * We have a separate function, don't assume that the chain can't be reused.
1865 */
1866 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1867 {
1868 struct list_head *pos, *temp;
1869 struct tsi148_dma_entry *entry;
1870
1871 /* detach and free each entry */
1872 list_for_each_safe(pos, temp, &list->entries) {
1873 list_del(pos);
1874 entry = list_entry(pos, struct tsi148_dma_entry, list);
1875 kfree(entry);
1876 }
1877
1878 return 0;
1879 }
1880
1881 /*
1882 * All 4 location monitors reside at the same base - this is therefore a
1883 * system wide configuration.
1884 *
1885 * This does not enable the LM monitor - that should be done when the first
1886 * callback is attached and disabled when the last callback is removed.
1887 */
1888 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1889 vme_address_t aspace, vme_cycle_t cycle)
1890 {
1891 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1892 int i;
1893 struct vme_bridge *tsi148_bridge;
1894 struct tsi148_driver *bridge;
1895
1896 tsi148_bridge = lm->parent;
1897
1898 bridge = tsi148_bridge->driver_priv;
1899
1900 mutex_lock(&lm->mtx);
1901
1902 /* If we already have a callback attached, we can't move it! */
1903 for (i = 0; i < lm->monitors; i++) {
1904 if (bridge->lm_callback[i] != NULL) {
1905 mutex_unlock(&lm->mtx);
1906 dev_err(tsi148_bridge->parent, "Location monitor "
1907 "callback attached, can't reset\n");
1908 return -EBUSY;
1909 }
1910 }
1911
1912 switch (aspace) {
1913 case VME_A16:
1914 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1915 break;
1916 case VME_A24:
1917 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1918 break;
1919 case VME_A32:
1920 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1921 break;
1922 case VME_A64:
1923 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1924 break;
1925 default:
1926 mutex_unlock(&lm->mtx);
1927 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1928 return -EINVAL;
1929 break;
1930 }
1931
1932 if (cycle & VME_SUPER)
1933 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1934 if (cycle & VME_USER)
1935 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1936 if (cycle & VME_PROG)
1937 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1938 if (cycle & VME_DATA)
1939 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1940
1941 reg_split(lm_base, &lm_base_high, &lm_base_low);
1942
1943 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1944 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1945 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1946
1947 mutex_unlock(&lm->mtx);
1948
1949 return 0;
1950 }
1951
1952 /* Get configuration of the callback monitor and return whether it is enabled
1953 * or disabled.
1954 */
1955 static int tsi148_lm_get(struct vme_lm_resource *lm,
1956 unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
1957 {
1958 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1959 struct tsi148_driver *bridge;
1960
1961 bridge = lm->parent->driver_priv;
1962
1963 mutex_lock(&lm->mtx);
1964
1965 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1966 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1967 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
1968
1969 reg_join(lm_base_high, lm_base_low, lm_base);
1970
1971 if (lm_ctl & TSI148_LCSR_LMAT_EN)
1972 enabled = 1;
1973
1974 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
1975 *aspace |= VME_A16;
1976
1977 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
1978 *aspace |= VME_A24;
1979
1980 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
1981 *aspace |= VME_A32;
1982
1983 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
1984 *aspace |= VME_A64;
1985
1986
1987 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
1988 *cycle |= VME_SUPER;
1989 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
1990 *cycle |= VME_USER;
1991 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
1992 *cycle |= VME_PROG;
1993 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
1994 *cycle |= VME_DATA;
1995
1996 mutex_unlock(&lm->mtx);
1997
1998 return enabled;
1999 }
2000
2001 /*
2002 * Attach a callback to a specific location monitor.
2003 *
2004 * Callback will be passed the monitor triggered.
2005 */
2006 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2007 void (*callback)(int))
2008 {
2009 u32 lm_ctl, tmp;
2010 struct vme_bridge *tsi148_bridge;
2011 struct tsi148_driver *bridge;
2012
2013 tsi148_bridge = lm->parent;
2014
2015 bridge = tsi148_bridge->driver_priv;
2016
2017 mutex_lock(&lm->mtx);
2018
2019 /* Ensure that the location monitor is configured - need PGM or DATA */
2020 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2021 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2022 mutex_unlock(&lm->mtx);
2023 dev_err(tsi148_bridge->parent, "Location monitor not properly "
2024 "configured\n");
2025 return -EINVAL;
2026 }
2027
2028 /* Check that a callback isn't already attached */
2029 if (bridge->lm_callback[monitor] != NULL) {
2030 mutex_unlock(&lm->mtx);
2031 dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2032 return -EBUSY;
2033 }
2034
2035 /* Attach callback */
2036 bridge->lm_callback[monitor] = callback;
2037
2038 /* Enable Location Monitor interrupt */
2039 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2040 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2041 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2042
2043 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2044 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2045 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2046
2047 /* Ensure that global Location Monitor Enable set */
2048 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2049 lm_ctl |= TSI148_LCSR_LMAT_EN;
2050 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2051 }
2052
2053 mutex_unlock(&lm->mtx);
2054
2055 return 0;
2056 }
2057
2058 /*
2059 * Detach a callback function forn a specific location monitor.
2060 */
2061 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2062 {
2063 u32 lm_en, tmp;
2064 struct tsi148_driver *bridge;
2065
2066 bridge = lm->parent->driver_priv;
2067
2068 mutex_lock(&lm->mtx);
2069
2070 /* Disable Location Monitor and ensure previous interrupts are clear */
2071 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2072 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2073 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2074
2075 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2076 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2077 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2078
2079 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2080 bridge->base + TSI148_LCSR_INTC);
2081
2082 /* Detach callback */
2083 bridge->lm_callback[monitor] = NULL;
2084
2085 /* If all location monitors disabled, disable global Location Monitor */
2086 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2087 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2088 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2089 tmp &= ~TSI148_LCSR_LMAT_EN;
2090 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2091 }
2092
2093 mutex_unlock(&lm->mtx);
2094
2095 return 0;
2096 }
2097
2098 /*
2099 * Determine Geographical Addressing
2100 */
2101 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2102 {
2103 u32 slot = 0;
2104 struct tsi148_driver *bridge;
2105
2106 bridge = tsi148_bridge->driver_priv;
2107
2108 if (!geoid) {
2109 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2110 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2111 } else
2112 slot = geoid;
2113
2114 return (int)slot;
2115 }
2116
2117 static int __init tsi148_init(void)
2118 {
2119 return pci_register_driver(&tsi148_driver);
2120 }
2121
2122 /*
2123 * Configure CR/CSR space
2124 *
2125 * Access to the CR/CSR can be configured at power-up. The location of the
2126 * CR/CSR registers in the CR/CSR address space is determined by the boards
2127 * Auto-ID or Geographic address. This function ensures that the window is
2128 * enabled at an offset consistent with the boards geopgraphic address.
2129 *
2130 * Each board has a 512kB window, with the highest 4kB being used for the
2131 * boards registers, this means there is a fix length 508kB window which must
2132 * be mapped onto PCI memory.
2133 */
2134 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2135 struct pci_dev *pdev)
2136 {
2137 u32 cbar, crat, vstat;
2138 u32 crcsr_bus_high, crcsr_bus_low;
2139 int retval;
2140 struct tsi148_driver *bridge;
2141
2142 bridge = tsi148_bridge->driver_priv;
2143
2144 /* Allocate mem for CR/CSR image */
2145 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2146 &bridge->crcsr_bus);
2147 if (bridge->crcsr_kernel == NULL) {
2148 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2149 "CR/CSR image\n");
2150 return -ENOMEM;
2151 }
2152
2153 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2154
2155 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2156
2157 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2158 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2159
2160 /* Ensure that the CR/CSR is configured at the correct offset */
2161 cbar = ioread32be(bridge->base + TSI148_CBAR);
2162 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2163
2164 vstat = tsi148_slot_get(tsi148_bridge);
2165
2166 if (cbar != vstat) {
2167 cbar = vstat;
2168 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2169 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2170 }
2171 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2172
2173 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2174 if (crat & TSI148_LCSR_CRAT_EN) {
2175 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2176 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2177 bridge->base + TSI148_LCSR_CRAT);
2178 } else
2179 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2180
2181 /* If we want flushed, error-checked writes, set up a window
2182 * over the CR/CSR registers. We read from here to safely flush
2183 * through VME writes.
2184 */
2185 if (err_chk) {
2186 retval = tsi148_master_set(bridge->flush_image, 1,
2187 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2188 VME_D16);
2189 if (retval)
2190 dev_err(tsi148_bridge->parent, "Configuring flush image"
2191 " failed\n");
2192 }
2193
2194 return 0;
2195
2196 }
2197
2198 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2199 struct pci_dev *pdev)
2200 {
2201 u32 crat;
2202 struct tsi148_driver *bridge;
2203
2204 bridge = tsi148_bridge->driver_priv;
2205
2206 /* Turn off CR/CSR space */
2207 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2208 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2209 bridge->base + TSI148_LCSR_CRAT);
2210
2211 /* Free image */
2212 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2213 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2214
2215 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2216 bridge->crcsr_bus);
2217 }
2218
2219 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2220 {
2221 int retval, i, master_num;
2222 u32 data;
2223 struct list_head *pos = NULL;
2224 struct vme_bridge *tsi148_bridge;
2225 struct tsi148_driver *tsi148_device;
2226 struct vme_master_resource *master_image;
2227 struct vme_slave_resource *slave_image;
2228 struct vme_dma_resource *dma_ctrlr;
2229 struct vme_lm_resource *lm;
2230
2231 /* If we want to support more than one of each bridge, we need to
2232 * dynamically generate this so we get one per device
2233 */
2234 tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
2235 if (tsi148_bridge == NULL) {
2236 dev_err(&pdev->dev, "Failed to allocate memory for device "
2237 "structure\n");
2238 retval = -ENOMEM;
2239 goto err_struct;
2240 }
2241
2242 tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2243 if (tsi148_device == NULL) {
2244 dev_err(&pdev->dev, "Failed to allocate memory for device "
2245 "structure\n");
2246 retval = -ENOMEM;
2247 goto err_driver;
2248 }
2249
2250 tsi148_bridge->driver_priv = tsi148_device;
2251
2252 /* Enable the device */
2253 retval = pci_enable_device(pdev);
2254 if (retval) {
2255 dev_err(&pdev->dev, "Unable to enable device\n");
2256 goto err_enable;
2257 }
2258
2259 /* Map Registers */
2260 retval = pci_request_regions(pdev, driver_name);
2261 if (retval) {
2262 dev_err(&pdev->dev, "Unable to reserve resources\n");
2263 goto err_resource;
2264 }
2265
2266 /* map registers in BAR 0 */
2267 tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2268 4096);
2269 if (!tsi148_device->base) {
2270 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2271 retval = -EIO;
2272 goto err_remap;
2273 }
2274
2275 /* Check to see if the mapping worked out */
2276 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2277 if (data != PCI_VENDOR_ID_TUNDRA) {
2278 dev_err(&pdev->dev, "CRG region check failed\n");
2279 retval = -EIO;
2280 goto err_test;
2281 }
2282
2283 /* Initialize wait queues & mutual exclusion flags */
2284 init_waitqueue_head(&tsi148_device->dma_queue[0]);
2285 init_waitqueue_head(&tsi148_device->dma_queue[1]);
2286 init_waitqueue_head(&tsi148_device->iack_queue);
2287 mutex_init(&tsi148_device->vme_int);
2288 mutex_init(&tsi148_device->vme_rmw);
2289
2290 tsi148_bridge->parent = &pdev->dev;
2291 strcpy(tsi148_bridge->name, driver_name);
2292
2293 /* Setup IRQ */
2294 retval = tsi148_irq_init(tsi148_bridge);
2295 if (retval != 0) {
2296 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2297 goto err_irq;
2298 }
2299
2300 /* If we are going to flush writes, we need to read from the VME bus.
2301 * We need to do this safely, thus we read the devices own CR/CSR
2302 * register. To do this we must set up a window in CR/CSR space and
2303 * hence have one less master window resource available.
2304 */
2305 master_num = TSI148_MAX_MASTER;
2306 if (err_chk) {
2307 master_num--;
2308
2309 tsi148_device->flush_image =
2310 kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2311 if (tsi148_device->flush_image == NULL) {
2312 dev_err(&pdev->dev, "Failed to allocate memory for "
2313 "flush resource structure\n");
2314 retval = -ENOMEM;
2315 goto err_master;
2316 }
2317 tsi148_device->flush_image->parent = tsi148_bridge;
2318 spin_lock_init(&tsi148_device->flush_image->lock);
2319 tsi148_device->flush_image->locked = 1;
2320 tsi148_device->flush_image->number = master_num;
2321 tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
2322 VME_A32 | VME_A64;
2323 tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
2324 VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
2325 VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
2326 VME_USER | VME_PROG | VME_DATA;
2327 tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
2328 memset(&tsi148_device->flush_image->bus_resource, 0,
2329 sizeof(struct resource));
2330 tsi148_device->flush_image->kern_base = NULL;
2331 }
2332
2333 /* Add master windows to list */
2334 INIT_LIST_HEAD(&tsi148_bridge->master_resources);
2335 for (i = 0; i < master_num; i++) {
2336 master_image = kmalloc(sizeof(struct vme_master_resource),
2337 GFP_KERNEL);
2338 if (master_image == NULL) {
2339 dev_err(&pdev->dev, "Failed to allocate memory for "
2340 "master resource structure\n");
2341 retval = -ENOMEM;
2342 goto err_master;
2343 }
2344 master_image->parent = tsi148_bridge;
2345 spin_lock_init(&master_image->lock);
2346 master_image->locked = 0;
2347 master_image->number = i;
2348 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2349 VME_A64;
2350 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2351 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2352 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2353 VME_PROG | VME_DATA;
2354 master_image->width_attr = VME_D16 | VME_D32;
2355 memset(&master_image->bus_resource, 0,
2356 sizeof(struct resource));
2357 master_image->kern_base = NULL;
2358 list_add_tail(&master_image->list,
2359 &tsi148_bridge->master_resources);
2360 }
2361
2362 /* Add slave windows to list */
2363 INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
2364 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2365 slave_image = kmalloc(sizeof(struct vme_slave_resource),
2366 GFP_KERNEL);
2367 if (slave_image == NULL) {
2368 dev_err(&pdev->dev, "Failed to allocate memory for "
2369 "slave resource structure\n");
2370 retval = -ENOMEM;
2371 goto err_slave;
2372 }
2373 slave_image->parent = tsi148_bridge;
2374 mutex_init(&slave_image->mtx);
2375 slave_image->locked = 0;
2376 slave_image->number = i;
2377 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2378 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2379 VME_USER3 | VME_USER4;
2380 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2381 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2382 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2383 VME_PROG | VME_DATA;
2384 list_add_tail(&slave_image->list,
2385 &tsi148_bridge->slave_resources);
2386 }
2387
2388 /* Add dma engines to list */
2389 INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
2390 for (i = 0; i < TSI148_MAX_DMA; i++) {
2391 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
2392 GFP_KERNEL);
2393 if (dma_ctrlr == NULL) {
2394 dev_err(&pdev->dev, "Failed to allocate memory for "
2395 "dma resource structure\n");
2396 retval = -ENOMEM;
2397 goto err_dma;
2398 }
2399 dma_ctrlr->parent = tsi148_bridge;
2400 mutex_init(&dma_ctrlr->mtx);
2401 dma_ctrlr->locked = 0;
2402 dma_ctrlr->number = i;
2403 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2404 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2405 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2406 VME_DMA_PATTERN_TO_MEM;
2407 INIT_LIST_HEAD(&dma_ctrlr->pending);
2408 INIT_LIST_HEAD(&dma_ctrlr->running);
2409 list_add_tail(&dma_ctrlr->list,
2410 &tsi148_bridge->dma_resources);
2411 }
2412
2413 /* Add location monitor to list */
2414 INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
2415 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2416 if (lm == NULL) {
2417 dev_err(&pdev->dev, "Failed to allocate memory for "
2418 "location monitor resource structure\n");
2419 retval = -ENOMEM;
2420 goto err_lm;
2421 }
2422 lm->parent = tsi148_bridge;
2423 mutex_init(&lm->mtx);
2424 lm->locked = 0;
2425 lm->number = 1;
2426 lm->monitors = 4;
2427 list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2428
2429 tsi148_bridge->slave_get = tsi148_slave_get;
2430 tsi148_bridge->slave_set = tsi148_slave_set;
2431 tsi148_bridge->master_get = tsi148_master_get;
2432 tsi148_bridge->master_set = tsi148_master_set;
2433 tsi148_bridge->master_read = tsi148_master_read;
2434 tsi148_bridge->master_write = tsi148_master_write;
2435 tsi148_bridge->master_rmw = tsi148_master_rmw;
2436 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2437 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2438 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2439 tsi148_bridge->irq_set = tsi148_irq_set;
2440 tsi148_bridge->irq_generate = tsi148_irq_generate;
2441 tsi148_bridge->lm_set = tsi148_lm_set;
2442 tsi148_bridge->lm_get = tsi148_lm_get;
2443 tsi148_bridge->lm_attach = tsi148_lm_attach;
2444 tsi148_bridge->lm_detach = tsi148_lm_detach;
2445 tsi148_bridge->slot_get = tsi148_slot_get;
2446
2447 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2448 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2449 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2450 if (!geoid)
2451 dev_info(&pdev->dev, "VME geographical address is %d\n",
2452 data & TSI148_LCSR_VSTAT_GA_M);
2453 else
2454 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2455 geoid);
2456
2457 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2458 err_chk ? "enabled" : "disabled");
2459
2460 if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
2461 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2462 goto err_crcsr;
2463 }
2464
2465 retval = vme_register_bridge(tsi148_bridge);
2466 if (retval != 0) {
2467 dev_err(&pdev->dev, "Chip Registration failed.\n");
2468 goto err_reg;
2469 }
2470
2471 pci_set_drvdata(pdev, tsi148_bridge);
2472
2473 /* Clear VME bus "board fail", and "power-up reset" lines */
2474 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2475 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2476 data |= TSI148_LCSR_VSTAT_CPURST;
2477 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2478
2479 return 0;
2480
2481 err_reg:
2482 tsi148_crcsr_exit(tsi148_bridge, pdev);
2483 err_crcsr:
2484 err_lm:
2485 /* resources are stored in link list */
2486 list_for_each(pos, &tsi148_bridge->lm_resources) {
2487 lm = list_entry(pos, struct vme_lm_resource, list);
2488 list_del(pos);
2489 kfree(lm);
2490 }
2491 err_dma:
2492 /* resources are stored in link list */
2493 list_for_each(pos, &tsi148_bridge->dma_resources) {
2494 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2495 list_del(pos);
2496 kfree(dma_ctrlr);
2497 }
2498 err_slave:
2499 /* resources are stored in link list */
2500 list_for_each(pos, &tsi148_bridge->slave_resources) {
2501 slave_image = list_entry(pos, struct vme_slave_resource, list);
2502 list_del(pos);
2503 kfree(slave_image);
2504 }
2505 err_master:
2506 /* resources are stored in link list */
2507 list_for_each(pos, &tsi148_bridge->master_resources) {
2508 master_image = list_entry(pos, struct vme_master_resource,
2509 list);
2510 list_del(pos);
2511 kfree(master_image);
2512 }
2513
2514 tsi148_irq_exit(tsi148_bridge, pdev);
2515 err_irq:
2516 err_test:
2517 iounmap(tsi148_device->base);
2518 err_remap:
2519 pci_release_regions(pdev);
2520 err_resource:
2521 pci_disable_device(pdev);
2522 err_enable:
2523 kfree(tsi148_device);
2524 err_driver:
2525 kfree(tsi148_bridge);
2526 err_struct:
2527 return retval;
2528
2529 }
2530
2531 static void tsi148_remove(struct pci_dev *pdev)
2532 {
2533 struct list_head *pos = NULL;
2534 struct list_head *tmplist;
2535 struct vme_master_resource *master_image;
2536 struct vme_slave_resource *slave_image;
2537 struct vme_dma_resource *dma_ctrlr;
2538 int i;
2539 struct tsi148_driver *bridge;
2540 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2541
2542 bridge = tsi148_bridge->driver_priv;
2543
2544
2545 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2546
2547 /*
2548 * Shutdown all inbound and outbound windows.
2549 */
2550 for (i = 0; i < 8; i++) {
2551 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2552 TSI148_LCSR_OFFSET_ITAT);
2553 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2554 TSI148_LCSR_OFFSET_OTAT);
2555 }
2556
2557 /*
2558 * Shutdown Location monitor.
2559 */
2560 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2561
2562 /*
2563 * Shutdown CRG map.
2564 */
2565 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2566
2567 /*
2568 * Clear error status.
2569 */
2570 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2571 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2572 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2573
2574 /*
2575 * Remove VIRQ interrupt (if any)
2576 */
2577 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2578 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2579
2580 /*
2581 * Map all Interrupts to PCI INTA
2582 */
2583 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2584 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2585
2586 tsi148_irq_exit(tsi148_bridge, pdev);
2587
2588 vme_unregister_bridge(tsi148_bridge);
2589
2590 tsi148_crcsr_exit(tsi148_bridge, pdev);
2591
2592 /* resources are stored in link list */
2593 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2594 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2595 list_del(pos);
2596 kfree(dma_ctrlr);
2597 }
2598
2599 /* resources are stored in link list */
2600 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2601 slave_image = list_entry(pos, struct vme_slave_resource, list);
2602 list_del(pos);
2603 kfree(slave_image);
2604 }
2605
2606 /* resources are stored in link list */
2607 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2608 master_image = list_entry(pos, struct vme_master_resource,
2609 list);
2610 list_del(pos);
2611 kfree(master_image);
2612 }
2613
2614 iounmap(bridge->base);
2615
2616 pci_release_regions(pdev);
2617
2618 pci_disable_device(pdev);
2619
2620 kfree(tsi148_bridge->driver_priv);
2621
2622 kfree(tsi148_bridge);
2623 }
2624
2625 static void __exit tsi148_exit(void)
2626 {
2627 pci_unregister_driver(&tsi148_driver);
2628 }
2629
2630 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2631 module_param(err_chk, bool, 0);
2632
2633 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2634 module_param(geoid, int, 0);
2635
2636 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2637 MODULE_LICENSE("GPL");
2638
2639 module_init(tsi148_init);
2640 module_exit(tsi148_exit);