880d9242e3490ed03eb467028c72b5570d3f1cda
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / vme / bridges / vme_tsi148.c
1 /*
2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32 #include <linux/byteorder/generic.h>
33 #include <linux/vme.h>
34
35 #include "../vme_bridge.h"
36 #include "vme_tsi148.h"
37
38 static int __init tsi148_init(void);
39 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
40 static void tsi148_remove(struct pci_dev *);
41 static void __exit tsi148_exit(void);
42
43
44 /* Module parameter */
45 static bool err_chk;
46 static int geoid;
47
48 static const char driver_name[] = "vme_tsi148";
49
50 static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
51 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
52 { },
53 };
54
55 static struct pci_driver tsi148_driver = {
56 .name = driver_name,
57 .id_table = tsi148_ids,
58 .probe = tsi148_probe,
59 .remove = tsi148_remove,
60 };
61
62 static void reg_join(unsigned int high, unsigned int low,
63 unsigned long long *variable)
64 {
65 *variable = (unsigned long long)high << 32;
66 *variable |= (unsigned long long)low;
67 }
68
69 static void reg_split(unsigned long long variable, unsigned int *high,
70 unsigned int *low)
71 {
72 *low = (unsigned int)variable & 0xFFFFFFFF;
73 *high = (unsigned int)(variable >> 32);
74 }
75
76 /*
77 * Wakes up DMA queue.
78 */
79 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
80 int channel_mask)
81 {
82 u32 serviced = 0;
83
84 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
85 wake_up(&bridge->dma_queue[0]);
86 serviced |= TSI148_LCSR_INTC_DMA0C;
87 }
88 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
89 wake_up(&bridge->dma_queue[1]);
90 serviced |= TSI148_LCSR_INTC_DMA1C;
91 }
92
93 return serviced;
94 }
95
96 /*
97 * Wake up location monitor queue
98 */
99 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
100 {
101 int i;
102 u32 serviced = 0;
103
104 for (i = 0; i < 4; i++) {
105 if (stat & TSI148_LCSR_INTS_LMS[i]) {
106 /* We only enable interrupts if the callback is set */
107 bridge->lm_callback[i](i);
108 serviced |= TSI148_LCSR_INTC_LMC[i];
109 }
110 }
111
112 return serviced;
113 }
114
115 /*
116 * Wake up mail box queue.
117 *
118 * XXX This functionality is not exposed up though API.
119 */
120 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
121 {
122 int i;
123 u32 val;
124 u32 serviced = 0;
125 struct tsi148_driver *bridge;
126
127 bridge = tsi148_bridge->driver_priv;
128
129 for (i = 0; i < 4; i++) {
130 if (stat & TSI148_LCSR_INTS_MBS[i]) {
131 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
132 dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
133 ": 0x%x\n", i, val);
134 serviced |= TSI148_LCSR_INTC_MBC[i];
135 }
136 }
137
138 return serviced;
139 }
140
141 /*
142 * Display error & status message when PERR (PCI) exception interrupt occurs.
143 */
144 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
145 {
146 struct tsi148_driver *bridge;
147
148 bridge = tsi148_bridge->driver_priv;
149
150 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
151 "attributes: %08x\n",
152 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
153 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
154 ioread32be(bridge->base + TSI148_LCSR_EDPAT));
155
156 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
157 "completion reg: %08x\n",
158 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
159 ioread32be(bridge->base + TSI148_LCSR_EDPXS));
160
161 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
162
163 return TSI148_LCSR_INTC_PERRC;
164 }
165
166 /*
167 * Save address and status when VME error interrupt occurs.
168 */
169 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
170 {
171 unsigned int error_addr_high, error_addr_low;
172 unsigned long long error_addr;
173 u32 error_attrib;
174 struct vme_bus_error *error;
175 struct tsi148_driver *bridge;
176
177 bridge = tsi148_bridge->driver_priv;
178
179 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
180 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
181 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
182
183 reg_join(error_addr_high, error_addr_low, &error_addr);
184
185 /* Check for exception register overflow (we have lost error data) */
186 if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
187 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
188 "Occurred\n");
189 }
190
191 error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
192 if (error) {
193 error->address = error_addr;
194 error->attributes = error_attrib;
195 list_add_tail(&error->list, &tsi148_bridge->vme_errors);
196 } else {
197 dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
198 "VMEbus Error reporting\n");
199 dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
200 "0x%llx, attributes: %08x\n", error_addr, error_attrib);
201 }
202
203 /* Clear Status */
204 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
205
206 return TSI148_LCSR_INTC_VERRC;
207 }
208
209 /*
210 * Wake up IACK queue.
211 */
212 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
213 {
214 wake_up(&bridge->iack_queue);
215
216 return TSI148_LCSR_INTC_IACKC;
217 }
218
219 /*
220 * Calling VME bus interrupt callback if provided.
221 */
222 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
223 u32 stat)
224 {
225 int vec, i, serviced = 0;
226 struct tsi148_driver *bridge;
227
228 bridge = tsi148_bridge->driver_priv;
229
230 for (i = 7; i > 0; i--) {
231 if (stat & (1 << i)) {
232 /*
233 * Note: Even though the registers are defined as
234 * 32-bits in the spec, we only want to issue 8-bit
235 * IACK cycles on the bus, read from offset 3.
236 */
237 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
238
239 vme_irq_handler(tsi148_bridge, i, vec);
240
241 serviced |= (1 << i);
242 }
243 }
244
245 return serviced;
246 }
247
248 /*
249 * Top level interrupt handler. Clears appropriate interrupt status bits and
250 * then calls appropriate sub handler(s).
251 */
252 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
253 {
254 u32 stat, enable, serviced = 0;
255 struct vme_bridge *tsi148_bridge;
256 struct tsi148_driver *bridge;
257
258 tsi148_bridge = ptr;
259
260 bridge = tsi148_bridge->driver_priv;
261
262 /* Determine which interrupts are unmasked and set */
263 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
264 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
265
266 /* Only look at unmasked interrupts */
267 stat &= enable;
268
269 if (unlikely(!stat))
270 return IRQ_NONE;
271
272 /* Call subhandlers as appropriate */
273 /* DMA irqs */
274 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
275 serviced |= tsi148_DMA_irqhandler(bridge, stat);
276
277 /* Location monitor irqs */
278 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
279 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
280 serviced |= tsi148_LM_irqhandler(bridge, stat);
281
282 /* Mail box irqs */
283 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
284 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
285 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
286
287 /* PCI bus error */
288 if (stat & TSI148_LCSR_INTS_PERRS)
289 serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
290
291 /* VME bus error */
292 if (stat & TSI148_LCSR_INTS_VERRS)
293 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
294
295 /* IACK irq */
296 if (stat & TSI148_LCSR_INTS_IACKS)
297 serviced |= tsi148_IACK_irqhandler(bridge);
298
299 /* VME bus irqs */
300 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
301 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
302 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
303 TSI148_LCSR_INTS_IRQ1S))
304 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
305
306 /* Clear serviced interrupts */
307 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
308
309 return IRQ_HANDLED;
310 }
311
312 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
313 {
314 int result;
315 unsigned int tmp;
316 struct pci_dev *pdev;
317 struct tsi148_driver *bridge;
318
319 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
320
321 bridge = tsi148_bridge->driver_priv;
322
323 /* Initialise list for VME bus errors */
324 INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
325
326 mutex_init(&tsi148_bridge->irq_mtx);
327
328 result = request_irq(pdev->irq,
329 tsi148_irqhandler,
330 IRQF_SHARED,
331 driver_name, tsi148_bridge);
332 if (result) {
333 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
334 "vector %02X\n", pdev->irq);
335 return result;
336 }
337
338 /* Enable and unmask interrupts */
339 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
340 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
341 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
342 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
343 TSI148_LCSR_INTEO_IACKEO;
344
345 /* This leaves the following interrupts masked.
346 * TSI148_LCSR_INTEO_VIEEO
347 * TSI148_LCSR_INTEO_SYSFLEO
348 * TSI148_LCSR_INTEO_ACFLEO
349 */
350
351 /* Don't enable Location Monitor interrupts here - they will be
352 * enabled when the location monitors are properly configured and
353 * a callback has been attached.
354 * TSI148_LCSR_INTEO_LM0EO
355 * TSI148_LCSR_INTEO_LM1EO
356 * TSI148_LCSR_INTEO_LM2EO
357 * TSI148_LCSR_INTEO_LM3EO
358 */
359
360 /* Don't enable VME interrupts until we add a handler, else the board
361 * will respond to it and we don't want that unless it knows how to
362 * properly deal with it.
363 * TSI148_LCSR_INTEO_IRQ7EO
364 * TSI148_LCSR_INTEO_IRQ6EO
365 * TSI148_LCSR_INTEO_IRQ5EO
366 * TSI148_LCSR_INTEO_IRQ4EO
367 * TSI148_LCSR_INTEO_IRQ3EO
368 * TSI148_LCSR_INTEO_IRQ2EO
369 * TSI148_LCSR_INTEO_IRQ1EO
370 */
371
372 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
373 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
374
375 return 0;
376 }
377
378 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
379 struct pci_dev *pdev)
380 {
381 struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
382
383 /* Turn off interrupts */
384 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
385 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
386
387 /* Clear all interrupts */
388 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
389
390 /* Detach interrupt handler */
391 free_irq(pdev->irq, tsi148_bridge);
392 }
393
394 /*
395 * Check to see if an IACk has been received, return true (1) or false (0).
396 */
397 static int tsi148_iack_received(struct tsi148_driver *bridge)
398 {
399 u32 tmp;
400
401 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
402
403 if (tmp & TSI148_LCSR_VICR_IRQS)
404 return 0;
405 else
406 return 1;
407 }
408
409 /*
410 * Configure VME interrupt
411 */
412 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
413 int state, int sync)
414 {
415 struct pci_dev *pdev;
416 u32 tmp;
417 struct tsi148_driver *bridge;
418
419 bridge = tsi148_bridge->driver_priv;
420
421 /* We need to do the ordering differently for enabling and disabling */
422 if (state == 0) {
423 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
424 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
425 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
426
427 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
428 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
429 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
430
431 if (sync != 0) {
432 pdev = container_of(tsi148_bridge->parent,
433 struct pci_dev, dev);
434
435 synchronize_irq(pdev->irq);
436 }
437 } else {
438 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
439 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
440 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
441
442 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
443 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
444 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
445 }
446 }
447
448 /*
449 * Generate a VME bus interrupt at the requested level & vector. Wait for
450 * interrupt to be acked.
451 */
452 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
453 int statid)
454 {
455 u32 tmp;
456 struct tsi148_driver *bridge;
457
458 bridge = tsi148_bridge->driver_priv;
459
460 mutex_lock(&bridge->vme_int);
461
462 /* Read VICR register */
463 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
464
465 /* Set Status/ID */
466 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
467 (statid & TSI148_LCSR_VICR_STID_M);
468 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
469
470 /* Assert VMEbus IRQ */
471 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
472 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
473
474 /* XXX Consider implementing a timeout? */
475 wait_event_interruptible(bridge->iack_queue,
476 tsi148_iack_received(bridge));
477
478 mutex_unlock(&bridge->vme_int);
479
480 return 0;
481 }
482
483 /*
484 * Find the first error in this address range
485 */
486 static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
487 u32 aspace, unsigned long long address, size_t count)
488 {
489 struct list_head *err_pos;
490 struct vme_bus_error *vme_err, *valid = NULL;
491 unsigned long long bound;
492
493 bound = address + count;
494
495 /*
496 * XXX We are currently not looking at the address space when parsing
497 * for errors. This is because parsing the Address Modifier Codes
498 * is going to be quite resource intensive to do properly. We
499 * should be OK just looking at the addresses and this is certainly
500 * much better than what we had before.
501 */
502 err_pos = NULL;
503 /* Iterate through errors */
504 list_for_each(err_pos, &tsi148_bridge->vme_errors) {
505 vme_err = list_entry(err_pos, struct vme_bus_error, list);
506 if ((vme_err->address >= address) &&
507 (vme_err->address < bound)) {
508
509 valid = vme_err;
510 break;
511 }
512 }
513
514 return valid;
515 }
516
517 /*
518 * Clear errors in the provided address range.
519 */
520 static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
521 u32 aspace, unsigned long long address, size_t count)
522 {
523 struct list_head *err_pos, *temp;
524 struct vme_bus_error *vme_err;
525 unsigned long long bound;
526
527 bound = address + count;
528
529 /*
530 * XXX We are currently not looking at the address space when parsing
531 * for errors. This is because parsing the Address Modifier Codes
532 * is going to be quite resource intensive to do properly. We
533 * should be OK just looking at the addresses and this is certainly
534 * much better than what we had before.
535 */
536 err_pos = NULL;
537 /* Iterate through errors */
538 list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
539 vme_err = list_entry(err_pos, struct vme_bus_error, list);
540
541 if ((vme_err->address >= address) &&
542 (vme_err->address < bound)) {
543
544 list_del(err_pos);
545 kfree(vme_err);
546 }
547 }
548 }
549
550 /*
551 * Initialize a slave window with the requested attributes.
552 */
553 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
554 unsigned long long vme_base, unsigned long long size,
555 dma_addr_t pci_base, u32 aspace, u32 cycle)
556 {
557 unsigned int i, addr = 0, granularity = 0;
558 unsigned int temp_ctl = 0;
559 unsigned int vme_base_low, vme_base_high;
560 unsigned int vme_bound_low, vme_bound_high;
561 unsigned int pci_offset_low, pci_offset_high;
562 unsigned long long vme_bound, pci_offset;
563 struct vme_bridge *tsi148_bridge;
564 struct tsi148_driver *bridge;
565
566 tsi148_bridge = image->parent;
567 bridge = tsi148_bridge->driver_priv;
568
569 i = image->number;
570
571 switch (aspace) {
572 case VME_A16:
573 granularity = 0x10;
574 addr |= TSI148_LCSR_ITAT_AS_A16;
575 break;
576 case VME_A24:
577 granularity = 0x1000;
578 addr |= TSI148_LCSR_ITAT_AS_A24;
579 break;
580 case VME_A32:
581 granularity = 0x10000;
582 addr |= TSI148_LCSR_ITAT_AS_A32;
583 break;
584 case VME_A64:
585 granularity = 0x10000;
586 addr |= TSI148_LCSR_ITAT_AS_A64;
587 break;
588 case VME_CRCSR:
589 case VME_USER1:
590 case VME_USER2:
591 case VME_USER3:
592 case VME_USER4:
593 default:
594 dev_err(tsi148_bridge->parent, "Invalid address space\n");
595 return -EINVAL;
596 break;
597 }
598
599 /* Convert 64-bit variables to 2x 32-bit variables */
600 reg_split(vme_base, &vme_base_high, &vme_base_low);
601
602 /*
603 * Bound address is a valid address for the window, adjust
604 * accordingly
605 */
606 vme_bound = vme_base + size - granularity;
607 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
608 pci_offset = (unsigned long long)pci_base - vme_base;
609 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
610
611 if (vme_base_low & (granularity - 1)) {
612 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
613 return -EINVAL;
614 }
615 if (vme_bound_low & (granularity - 1)) {
616 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
617 return -EINVAL;
618 }
619 if (pci_offset_low & (granularity - 1)) {
620 dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
621 "alignment\n");
622 return -EINVAL;
623 }
624
625 /* Disable while we are mucking around */
626 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
627 TSI148_LCSR_OFFSET_ITAT);
628 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
629 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
630 TSI148_LCSR_OFFSET_ITAT);
631
632 /* Setup mapping */
633 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
634 TSI148_LCSR_OFFSET_ITSAU);
635 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
636 TSI148_LCSR_OFFSET_ITSAL);
637 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
638 TSI148_LCSR_OFFSET_ITEAU);
639 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
640 TSI148_LCSR_OFFSET_ITEAL);
641 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
642 TSI148_LCSR_OFFSET_ITOFU);
643 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
644 TSI148_LCSR_OFFSET_ITOFL);
645
646 /* Setup 2eSST speeds */
647 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
648 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
649 case VME_2eSST160:
650 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
651 break;
652 case VME_2eSST267:
653 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
654 break;
655 case VME_2eSST320:
656 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
657 break;
658 }
659
660 /* Setup cycle types */
661 temp_ctl &= ~(0x1F << 7);
662 if (cycle & VME_BLT)
663 temp_ctl |= TSI148_LCSR_ITAT_BLT;
664 if (cycle & VME_MBLT)
665 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
666 if (cycle & VME_2eVME)
667 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
668 if (cycle & VME_2eSST)
669 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
670 if (cycle & VME_2eSSTB)
671 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
672
673 /* Setup address space */
674 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
675 temp_ctl |= addr;
676
677 temp_ctl &= ~0xF;
678 if (cycle & VME_SUPER)
679 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
680 if (cycle & VME_USER)
681 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
682 if (cycle & VME_PROG)
683 temp_ctl |= TSI148_LCSR_ITAT_PGM;
684 if (cycle & VME_DATA)
685 temp_ctl |= TSI148_LCSR_ITAT_DATA;
686
687 /* Write ctl reg without enable */
688 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
689 TSI148_LCSR_OFFSET_ITAT);
690
691 if (enabled)
692 temp_ctl |= TSI148_LCSR_ITAT_EN;
693
694 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
695 TSI148_LCSR_OFFSET_ITAT);
696
697 return 0;
698 }
699
700 /*
701 * Get slave window configuration.
702 */
703 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
704 unsigned long long *vme_base, unsigned long long *size,
705 dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
706 {
707 unsigned int i, granularity = 0, ctl = 0;
708 unsigned int vme_base_low, vme_base_high;
709 unsigned int vme_bound_low, vme_bound_high;
710 unsigned int pci_offset_low, pci_offset_high;
711 unsigned long long vme_bound, pci_offset;
712 struct tsi148_driver *bridge;
713
714 bridge = image->parent->driver_priv;
715
716 i = image->number;
717
718 /* Read registers */
719 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
720 TSI148_LCSR_OFFSET_ITAT);
721
722 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
723 TSI148_LCSR_OFFSET_ITSAU);
724 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
725 TSI148_LCSR_OFFSET_ITSAL);
726 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
727 TSI148_LCSR_OFFSET_ITEAU);
728 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
729 TSI148_LCSR_OFFSET_ITEAL);
730 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
731 TSI148_LCSR_OFFSET_ITOFU);
732 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
733 TSI148_LCSR_OFFSET_ITOFL);
734
735 /* Convert 64-bit variables to 2x 32-bit variables */
736 reg_join(vme_base_high, vme_base_low, vme_base);
737 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
738 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
739
740 *pci_base = (dma_addr_t)vme_base + pci_offset;
741
742 *enabled = 0;
743 *aspace = 0;
744 *cycle = 0;
745
746 if (ctl & TSI148_LCSR_ITAT_EN)
747 *enabled = 1;
748
749 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
750 granularity = 0x10;
751 *aspace |= VME_A16;
752 }
753 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
754 granularity = 0x1000;
755 *aspace |= VME_A24;
756 }
757 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
758 granularity = 0x10000;
759 *aspace |= VME_A32;
760 }
761 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
762 granularity = 0x10000;
763 *aspace |= VME_A64;
764 }
765
766 /* Need granularity before we set the size */
767 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
768
769
770 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
771 *cycle |= VME_2eSST160;
772 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
773 *cycle |= VME_2eSST267;
774 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
775 *cycle |= VME_2eSST320;
776
777 if (ctl & TSI148_LCSR_ITAT_BLT)
778 *cycle |= VME_BLT;
779 if (ctl & TSI148_LCSR_ITAT_MBLT)
780 *cycle |= VME_MBLT;
781 if (ctl & TSI148_LCSR_ITAT_2eVME)
782 *cycle |= VME_2eVME;
783 if (ctl & TSI148_LCSR_ITAT_2eSST)
784 *cycle |= VME_2eSST;
785 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
786 *cycle |= VME_2eSSTB;
787
788 if (ctl & TSI148_LCSR_ITAT_SUPR)
789 *cycle |= VME_SUPER;
790 if (ctl & TSI148_LCSR_ITAT_NPRIV)
791 *cycle |= VME_USER;
792 if (ctl & TSI148_LCSR_ITAT_PGM)
793 *cycle |= VME_PROG;
794 if (ctl & TSI148_LCSR_ITAT_DATA)
795 *cycle |= VME_DATA;
796
797 return 0;
798 }
799
800 /*
801 * Allocate and map PCI Resource
802 */
803 static int tsi148_alloc_resource(struct vme_master_resource *image,
804 unsigned long long size)
805 {
806 unsigned long long existing_size;
807 int retval = 0;
808 struct pci_dev *pdev;
809 struct vme_bridge *tsi148_bridge;
810
811 tsi148_bridge = image->parent;
812
813 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
814
815 existing_size = (unsigned long long)(image->bus_resource.end -
816 image->bus_resource.start);
817
818 /* If the existing size is OK, return */
819 if ((size != 0) && (existing_size == (size - 1)))
820 return 0;
821
822 if (existing_size != 0) {
823 iounmap(image->kern_base);
824 image->kern_base = NULL;
825 kfree(image->bus_resource.name);
826 release_resource(&image->bus_resource);
827 memset(&image->bus_resource, 0, sizeof(struct resource));
828 }
829
830 /* Exit here if size is zero */
831 if (size == 0)
832 return 0;
833
834 if (image->bus_resource.name == NULL) {
835 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
836 if (image->bus_resource.name == NULL) {
837 dev_err(tsi148_bridge->parent, "Unable to allocate "
838 "memory for resource name\n");
839 retval = -ENOMEM;
840 goto err_name;
841 }
842 }
843
844 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
845 image->number);
846
847 image->bus_resource.start = 0;
848 image->bus_resource.end = (unsigned long)size;
849 image->bus_resource.flags = IORESOURCE_MEM;
850
851 retval = pci_bus_alloc_resource(pdev->bus,
852 &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
853 0, NULL, NULL);
854 if (retval) {
855 dev_err(tsi148_bridge->parent, "Failed to allocate mem "
856 "resource for window %d size 0x%lx start 0x%lx\n",
857 image->number, (unsigned long)size,
858 (unsigned long)image->bus_resource.start);
859 goto err_resource;
860 }
861
862 image->kern_base = ioremap_nocache(
863 image->bus_resource.start, size);
864 if (image->kern_base == NULL) {
865 dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
866 retval = -ENOMEM;
867 goto err_remap;
868 }
869
870 return 0;
871
872 err_remap:
873 release_resource(&image->bus_resource);
874 err_resource:
875 kfree(image->bus_resource.name);
876 memset(&image->bus_resource, 0, sizeof(struct resource));
877 err_name:
878 return retval;
879 }
880
881 /*
882 * Free and unmap PCI Resource
883 */
884 static void tsi148_free_resource(struct vme_master_resource *image)
885 {
886 iounmap(image->kern_base);
887 image->kern_base = NULL;
888 release_resource(&image->bus_resource);
889 kfree(image->bus_resource.name);
890 memset(&image->bus_resource, 0, sizeof(struct resource));
891 }
892
893 /*
894 * Set the attributes of an outbound window.
895 */
896 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
897 unsigned long long vme_base, unsigned long long size, u32 aspace,
898 u32 cycle, u32 dwidth)
899 {
900 int retval = 0;
901 unsigned int i;
902 unsigned int temp_ctl = 0;
903 unsigned int pci_base_low, pci_base_high;
904 unsigned int pci_bound_low, pci_bound_high;
905 unsigned int vme_offset_low, vme_offset_high;
906 unsigned long long pci_bound, vme_offset, pci_base;
907 struct vme_bridge *tsi148_bridge;
908 struct tsi148_driver *bridge;
909
910 tsi148_bridge = image->parent;
911
912 bridge = tsi148_bridge->driver_priv;
913
914 /* Verify input data */
915 if (vme_base & 0xFFFF) {
916 dev_err(tsi148_bridge->parent, "Invalid VME Window "
917 "alignment\n");
918 retval = -EINVAL;
919 goto err_window;
920 }
921
922 if ((size == 0) && (enabled != 0)) {
923 dev_err(tsi148_bridge->parent, "Size must be non-zero for "
924 "enabled windows\n");
925 retval = -EINVAL;
926 goto err_window;
927 }
928
929 spin_lock(&image->lock);
930
931 /* Let's allocate the resource here rather than further up the stack as
932 * it avoids pushing loads of bus dependent stuff up the stack. If size
933 * is zero, any existing resource will be freed.
934 */
935 retval = tsi148_alloc_resource(image, size);
936 if (retval) {
937 spin_unlock(&image->lock);
938 dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
939 "resource\n");
940 goto err_res;
941 }
942
943 if (size == 0) {
944 pci_base = 0;
945 pci_bound = 0;
946 vme_offset = 0;
947 } else {
948 pci_base = (unsigned long long)image->bus_resource.start;
949
950 /*
951 * Bound address is a valid address for the window, adjust
952 * according to window granularity.
953 */
954 pci_bound = pci_base + (size - 0x10000);
955 vme_offset = vme_base - pci_base;
956 }
957
958 /* Convert 64-bit variables to 2x 32-bit variables */
959 reg_split(pci_base, &pci_base_high, &pci_base_low);
960 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
961 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
962
963 if (pci_base_low & 0xFFFF) {
964 spin_unlock(&image->lock);
965 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
966 retval = -EINVAL;
967 goto err_gran;
968 }
969 if (pci_bound_low & 0xFFFF) {
970 spin_unlock(&image->lock);
971 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
972 retval = -EINVAL;
973 goto err_gran;
974 }
975 if (vme_offset_low & 0xFFFF) {
976 spin_unlock(&image->lock);
977 dev_err(tsi148_bridge->parent, "Invalid VME Offset "
978 "alignment\n");
979 retval = -EINVAL;
980 goto err_gran;
981 }
982
983 i = image->number;
984
985 /* Disable while we are mucking around */
986 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
987 TSI148_LCSR_OFFSET_OTAT);
988 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
989 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
990 TSI148_LCSR_OFFSET_OTAT);
991
992 /* Setup 2eSST speeds */
993 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
994 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
995 case VME_2eSST160:
996 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
997 break;
998 case VME_2eSST267:
999 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1000 break;
1001 case VME_2eSST320:
1002 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1003 break;
1004 }
1005
1006 /* Setup cycle types */
1007 if (cycle & VME_BLT) {
1008 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1009 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1010 }
1011 if (cycle & VME_MBLT) {
1012 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1013 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1014 }
1015 if (cycle & VME_2eVME) {
1016 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1017 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1018 }
1019 if (cycle & VME_2eSST) {
1020 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1021 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1022 }
1023 if (cycle & VME_2eSSTB) {
1024 dev_warn(tsi148_bridge->parent, "Currently not setting "
1025 "Broadcast Select Registers\n");
1026 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1027 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1028 }
1029
1030 /* Setup data width */
1031 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1032 switch (dwidth) {
1033 case VME_D16:
1034 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1035 break;
1036 case VME_D32:
1037 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1038 break;
1039 default:
1040 spin_unlock(&image->lock);
1041 dev_err(tsi148_bridge->parent, "Invalid data width\n");
1042 retval = -EINVAL;
1043 goto err_dwidth;
1044 }
1045
1046 /* Setup address space */
1047 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1048 switch (aspace) {
1049 case VME_A16:
1050 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1051 break;
1052 case VME_A24:
1053 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1054 break;
1055 case VME_A32:
1056 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1057 break;
1058 case VME_A64:
1059 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1060 break;
1061 case VME_CRCSR:
1062 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1063 break;
1064 case VME_USER1:
1065 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1066 break;
1067 case VME_USER2:
1068 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1069 break;
1070 case VME_USER3:
1071 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1072 break;
1073 case VME_USER4:
1074 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1075 break;
1076 default:
1077 spin_unlock(&image->lock);
1078 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1079 retval = -EINVAL;
1080 goto err_aspace;
1081 break;
1082 }
1083
1084 temp_ctl &= ~(3<<4);
1085 if (cycle & VME_SUPER)
1086 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1087 if (cycle & VME_PROG)
1088 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1089
1090 /* Setup mapping */
1091 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1092 TSI148_LCSR_OFFSET_OTSAU);
1093 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1094 TSI148_LCSR_OFFSET_OTSAL);
1095 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1096 TSI148_LCSR_OFFSET_OTEAU);
1097 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1098 TSI148_LCSR_OFFSET_OTEAL);
1099 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1100 TSI148_LCSR_OFFSET_OTOFU);
1101 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1102 TSI148_LCSR_OFFSET_OTOFL);
1103
1104 /* Write ctl reg without enable */
1105 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1106 TSI148_LCSR_OFFSET_OTAT);
1107
1108 if (enabled)
1109 temp_ctl |= TSI148_LCSR_OTAT_EN;
1110
1111 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1112 TSI148_LCSR_OFFSET_OTAT);
1113
1114 spin_unlock(&image->lock);
1115 return 0;
1116
1117 err_aspace:
1118 err_dwidth:
1119 err_gran:
1120 tsi148_free_resource(image);
1121 err_res:
1122 err_window:
1123 return retval;
1124
1125 }
1126
1127 /*
1128 * Set the attributes of an outbound window.
1129 *
1130 * XXX Not parsing prefetch information.
1131 */
1132 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1133 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1134 u32 *cycle, u32 *dwidth)
1135 {
1136 unsigned int i, ctl;
1137 unsigned int pci_base_low, pci_base_high;
1138 unsigned int pci_bound_low, pci_bound_high;
1139 unsigned int vme_offset_low, vme_offset_high;
1140
1141 unsigned long long pci_base, pci_bound, vme_offset;
1142 struct tsi148_driver *bridge;
1143
1144 bridge = image->parent->driver_priv;
1145
1146 i = image->number;
1147
1148 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1149 TSI148_LCSR_OFFSET_OTAT);
1150
1151 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1152 TSI148_LCSR_OFFSET_OTSAU);
1153 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1154 TSI148_LCSR_OFFSET_OTSAL);
1155 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1156 TSI148_LCSR_OFFSET_OTEAU);
1157 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1158 TSI148_LCSR_OFFSET_OTEAL);
1159 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1160 TSI148_LCSR_OFFSET_OTOFU);
1161 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1162 TSI148_LCSR_OFFSET_OTOFL);
1163
1164 /* Convert 64-bit variables to 2x 32-bit variables */
1165 reg_join(pci_base_high, pci_base_low, &pci_base);
1166 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1167 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1168
1169 *vme_base = pci_base + vme_offset;
1170 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1171
1172 *enabled = 0;
1173 *aspace = 0;
1174 *cycle = 0;
1175 *dwidth = 0;
1176
1177 if (ctl & TSI148_LCSR_OTAT_EN)
1178 *enabled = 1;
1179
1180 /* Setup address space */
1181 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1182 *aspace |= VME_A16;
1183 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1184 *aspace |= VME_A24;
1185 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1186 *aspace |= VME_A32;
1187 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1188 *aspace |= VME_A64;
1189 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1190 *aspace |= VME_CRCSR;
1191 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1192 *aspace |= VME_USER1;
1193 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1194 *aspace |= VME_USER2;
1195 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1196 *aspace |= VME_USER3;
1197 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1198 *aspace |= VME_USER4;
1199
1200 /* Setup 2eSST speeds */
1201 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1202 *cycle |= VME_2eSST160;
1203 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1204 *cycle |= VME_2eSST267;
1205 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1206 *cycle |= VME_2eSST320;
1207
1208 /* Setup cycle types */
1209 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1210 *cycle |= VME_SCT;
1211 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1212 *cycle |= VME_BLT;
1213 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1214 *cycle |= VME_MBLT;
1215 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1216 *cycle |= VME_2eVME;
1217 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1218 *cycle |= VME_2eSST;
1219 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1220 *cycle |= VME_2eSSTB;
1221
1222 if (ctl & TSI148_LCSR_OTAT_SUP)
1223 *cycle |= VME_SUPER;
1224 else
1225 *cycle |= VME_USER;
1226
1227 if (ctl & TSI148_LCSR_OTAT_PGM)
1228 *cycle |= VME_PROG;
1229 else
1230 *cycle |= VME_DATA;
1231
1232 /* Setup data width */
1233 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1234 *dwidth = VME_D16;
1235 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1236 *dwidth = VME_D32;
1237
1238 return 0;
1239 }
1240
1241
1242 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1243 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1244 u32 *cycle, u32 *dwidth)
1245 {
1246 int retval;
1247
1248 spin_lock(&image->lock);
1249
1250 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1251 cycle, dwidth);
1252
1253 spin_unlock(&image->lock);
1254
1255 return retval;
1256 }
1257
1258 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1259 size_t count, loff_t offset)
1260 {
1261 int retval, enabled;
1262 unsigned long long vme_base, size;
1263 u32 aspace, cycle, dwidth;
1264 struct vme_bus_error *vme_err = NULL;
1265 struct vme_bridge *tsi148_bridge;
1266 void *addr = image->kern_base + offset;
1267 unsigned int done = 0;
1268 unsigned int count32;
1269
1270 tsi148_bridge = image->parent;
1271
1272 spin_lock(&image->lock);
1273
1274 /* The following code handles VME address alignment. We cannot use
1275 * memcpy_xxx directly here because it may cut small data transfers in
1276 * to 8-bit cycles, thus making D16 cycle impossible.
1277 * On the other hand, the bridge itself assures that the maximum data
1278 * cycle configured for the transfer is used and splits it
1279 * automatically for non-aligned addresses, so we don't want the
1280 * overhead of needlessly forcing small transfers for the entire cycle.
1281 */
1282 if ((uintptr_t)addr & 0x1) {
1283 *(u8 *)buf = ioread8(addr);
1284 done += 1;
1285 if (done == count)
1286 goto out;
1287 }
1288 if ((uintptr_t)addr & 0x2) {
1289 if ((count - done) < 2) {
1290 *(u8 *)(buf + done) = ioread8(addr + done);
1291 done += 1;
1292 goto out;
1293 } else {
1294 *(u16 *)(buf + done) = ioread16(addr + done);
1295 done += 2;
1296 }
1297 }
1298
1299 count32 = (count - done) & ~0x3;
1300 if (count32 > 0) {
1301 memcpy_fromio(buf + done, addr + done, count32);
1302 done += count32;
1303 }
1304
1305 if ((count - done) & 0x2) {
1306 *(u16 *)(buf + done) = ioread16(addr + done);
1307 done += 2;
1308 }
1309 if ((count - done) & 0x1) {
1310 *(u8 *)(buf + done) = ioread8(addr + done);
1311 done += 1;
1312 }
1313
1314 out:
1315 retval = count;
1316
1317 if (!err_chk)
1318 goto skip_chk;
1319
1320 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1321 &dwidth);
1322
1323 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1324 count);
1325 if (vme_err != NULL) {
1326 dev_err(image->parent->parent, "First VME read error detected "
1327 "an at address 0x%llx\n", vme_err->address);
1328 retval = vme_err->address - (vme_base + offset);
1329 /* Clear down save errors in this address range */
1330 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1331 count);
1332 }
1333
1334 skip_chk:
1335 spin_unlock(&image->lock);
1336
1337 return retval;
1338 }
1339
1340
1341 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1342 size_t count, loff_t offset)
1343 {
1344 int retval = 0, enabled;
1345 unsigned long long vme_base, size;
1346 u32 aspace, cycle, dwidth;
1347 void *addr = image->kern_base + offset;
1348 unsigned int done = 0;
1349 unsigned int count32;
1350
1351 struct vme_bus_error *vme_err = NULL;
1352 struct vme_bridge *tsi148_bridge;
1353 struct tsi148_driver *bridge;
1354
1355 tsi148_bridge = image->parent;
1356
1357 bridge = tsi148_bridge->driver_priv;
1358
1359 spin_lock(&image->lock);
1360
1361 /* Here we apply for the same strategy we do in master_read
1362 * function in order to assure D16 cycle when required.
1363 */
1364 if ((uintptr_t)addr & 0x1) {
1365 iowrite8(*(u8 *)buf, addr);
1366 done += 1;
1367 if (done == count)
1368 goto out;
1369 }
1370 if ((uintptr_t)addr & 0x2) {
1371 if ((count - done) < 2) {
1372 iowrite8(*(u8 *)(buf + done), addr + done);
1373 done += 1;
1374 goto out;
1375 } else {
1376 iowrite16(*(u16 *)(buf + done), addr + done);
1377 done += 2;
1378 }
1379 }
1380
1381 count32 = (count - done) & ~0x3;
1382 if (count32 > 0) {
1383 memcpy_toio(addr + done, buf + done, count32);
1384 done += count32;
1385 }
1386
1387 if ((count - done) & 0x2) {
1388 iowrite16(*(u16 *)(buf + done), addr + done);
1389 done += 2;
1390 }
1391 if ((count - done) & 0x1) {
1392 iowrite8(*(u8 *)(buf + done), addr + done);
1393 done += 1;
1394 }
1395
1396 out:
1397 retval = count;
1398
1399 /*
1400 * Writes are posted. We need to do a read on the VME bus to flush out
1401 * all of the writes before we check for errors. We can't guarantee
1402 * that reading the data we have just written is safe. It is believed
1403 * that there isn't any read, write re-ordering, so we can read any
1404 * location in VME space, so lets read the Device ID from the tsi148's
1405 * own registers as mapped into CR/CSR space.
1406 *
1407 * We check for saved errors in the written address range/space.
1408 */
1409
1410 if (!err_chk)
1411 goto skip_chk;
1412
1413 /*
1414 * Get window info first, to maximise the time that the buffers may
1415 * fluch on their own
1416 */
1417 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1418 &dwidth);
1419
1420 ioread16(bridge->flush_image->kern_base + 0x7F000);
1421
1422 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1423 count);
1424 if (vme_err != NULL) {
1425 dev_warn(tsi148_bridge->parent, "First VME write error detected"
1426 " an at address 0x%llx\n", vme_err->address);
1427 retval = vme_err->address - (vme_base + offset);
1428 /* Clear down save errors in this address range */
1429 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1430 count);
1431 }
1432
1433 skip_chk:
1434 spin_unlock(&image->lock);
1435
1436 return retval;
1437 }
1438
1439 /*
1440 * Perform an RMW cycle on the VME bus.
1441 *
1442 * Requires a previously configured master window, returns final value.
1443 */
1444 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1445 unsigned int mask, unsigned int compare, unsigned int swap,
1446 loff_t offset)
1447 {
1448 unsigned long long pci_addr;
1449 unsigned int pci_addr_high, pci_addr_low;
1450 u32 tmp, result;
1451 int i;
1452 struct tsi148_driver *bridge;
1453
1454 bridge = image->parent->driver_priv;
1455
1456 /* Find the PCI address that maps to the desired VME address */
1457 i = image->number;
1458
1459 /* Locking as we can only do one of these at a time */
1460 mutex_lock(&bridge->vme_rmw);
1461
1462 /* Lock image */
1463 spin_lock(&image->lock);
1464
1465 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1466 TSI148_LCSR_OFFSET_OTSAU);
1467 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1468 TSI148_LCSR_OFFSET_OTSAL);
1469
1470 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1471 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1472
1473 /* Configure registers */
1474 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1475 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1476 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1477 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1478 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1479
1480 /* Enable RMW */
1481 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1482 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1483 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1484
1485 /* Kick process off with a read to the required address. */
1486 result = ioread32be(image->kern_base + offset);
1487
1488 /* Disable RMW */
1489 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1490 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1491 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1492
1493 spin_unlock(&image->lock);
1494
1495 mutex_unlock(&bridge->vme_rmw);
1496
1497 return result;
1498 }
1499
1500 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1501 u32 aspace, u32 cycle, u32 dwidth)
1502 {
1503 u32 val;
1504
1505 val = be32_to_cpu(*attr);
1506
1507 /* Setup 2eSST speeds */
1508 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1509 case VME_2eSST160:
1510 val |= TSI148_LCSR_DSAT_2eSSTM_160;
1511 break;
1512 case VME_2eSST267:
1513 val |= TSI148_LCSR_DSAT_2eSSTM_267;
1514 break;
1515 case VME_2eSST320:
1516 val |= TSI148_LCSR_DSAT_2eSSTM_320;
1517 break;
1518 }
1519
1520 /* Setup cycle types */
1521 if (cycle & VME_SCT)
1522 val |= TSI148_LCSR_DSAT_TM_SCT;
1523
1524 if (cycle & VME_BLT)
1525 val |= TSI148_LCSR_DSAT_TM_BLT;
1526
1527 if (cycle & VME_MBLT)
1528 val |= TSI148_LCSR_DSAT_TM_MBLT;
1529
1530 if (cycle & VME_2eVME)
1531 val |= TSI148_LCSR_DSAT_TM_2eVME;
1532
1533 if (cycle & VME_2eSST)
1534 val |= TSI148_LCSR_DSAT_TM_2eSST;
1535
1536 if (cycle & VME_2eSSTB) {
1537 dev_err(dev, "Currently not setting Broadcast Select "
1538 "Registers\n");
1539 val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1540 }
1541
1542 /* Setup data width */
1543 switch (dwidth) {
1544 case VME_D16:
1545 val |= TSI148_LCSR_DSAT_DBW_16;
1546 break;
1547 case VME_D32:
1548 val |= TSI148_LCSR_DSAT_DBW_32;
1549 break;
1550 default:
1551 dev_err(dev, "Invalid data width\n");
1552 return -EINVAL;
1553 }
1554
1555 /* Setup address space */
1556 switch (aspace) {
1557 case VME_A16:
1558 val |= TSI148_LCSR_DSAT_AMODE_A16;
1559 break;
1560 case VME_A24:
1561 val |= TSI148_LCSR_DSAT_AMODE_A24;
1562 break;
1563 case VME_A32:
1564 val |= TSI148_LCSR_DSAT_AMODE_A32;
1565 break;
1566 case VME_A64:
1567 val |= TSI148_LCSR_DSAT_AMODE_A64;
1568 break;
1569 case VME_CRCSR:
1570 val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1571 break;
1572 case VME_USER1:
1573 val |= TSI148_LCSR_DSAT_AMODE_USER1;
1574 break;
1575 case VME_USER2:
1576 val |= TSI148_LCSR_DSAT_AMODE_USER2;
1577 break;
1578 case VME_USER3:
1579 val |= TSI148_LCSR_DSAT_AMODE_USER3;
1580 break;
1581 case VME_USER4:
1582 val |= TSI148_LCSR_DSAT_AMODE_USER4;
1583 break;
1584 default:
1585 dev_err(dev, "Invalid address space\n");
1586 return -EINVAL;
1587 break;
1588 }
1589
1590 if (cycle & VME_SUPER)
1591 val |= TSI148_LCSR_DSAT_SUP;
1592 if (cycle & VME_PROG)
1593 val |= TSI148_LCSR_DSAT_PGM;
1594
1595 *attr = cpu_to_be32(val);
1596
1597 return 0;
1598 }
1599
1600 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1601 u32 aspace, u32 cycle, u32 dwidth)
1602 {
1603 u32 val;
1604
1605 val = be32_to_cpu(*attr);
1606
1607 /* Setup 2eSST speeds */
1608 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1609 case VME_2eSST160:
1610 val |= TSI148_LCSR_DDAT_2eSSTM_160;
1611 break;
1612 case VME_2eSST267:
1613 val |= TSI148_LCSR_DDAT_2eSSTM_267;
1614 break;
1615 case VME_2eSST320:
1616 val |= TSI148_LCSR_DDAT_2eSSTM_320;
1617 break;
1618 }
1619
1620 /* Setup cycle types */
1621 if (cycle & VME_SCT)
1622 val |= TSI148_LCSR_DDAT_TM_SCT;
1623
1624 if (cycle & VME_BLT)
1625 val |= TSI148_LCSR_DDAT_TM_BLT;
1626
1627 if (cycle & VME_MBLT)
1628 val |= TSI148_LCSR_DDAT_TM_MBLT;
1629
1630 if (cycle & VME_2eVME)
1631 val |= TSI148_LCSR_DDAT_TM_2eVME;
1632
1633 if (cycle & VME_2eSST)
1634 val |= TSI148_LCSR_DDAT_TM_2eSST;
1635
1636 if (cycle & VME_2eSSTB) {
1637 dev_err(dev, "Currently not setting Broadcast Select "
1638 "Registers\n");
1639 val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1640 }
1641
1642 /* Setup data width */
1643 switch (dwidth) {
1644 case VME_D16:
1645 val |= TSI148_LCSR_DDAT_DBW_16;
1646 break;
1647 case VME_D32:
1648 val |= TSI148_LCSR_DDAT_DBW_32;
1649 break;
1650 default:
1651 dev_err(dev, "Invalid data width\n");
1652 return -EINVAL;
1653 }
1654
1655 /* Setup address space */
1656 switch (aspace) {
1657 case VME_A16:
1658 val |= TSI148_LCSR_DDAT_AMODE_A16;
1659 break;
1660 case VME_A24:
1661 val |= TSI148_LCSR_DDAT_AMODE_A24;
1662 break;
1663 case VME_A32:
1664 val |= TSI148_LCSR_DDAT_AMODE_A32;
1665 break;
1666 case VME_A64:
1667 val |= TSI148_LCSR_DDAT_AMODE_A64;
1668 break;
1669 case VME_CRCSR:
1670 val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1671 break;
1672 case VME_USER1:
1673 val |= TSI148_LCSR_DDAT_AMODE_USER1;
1674 break;
1675 case VME_USER2:
1676 val |= TSI148_LCSR_DDAT_AMODE_USER2;
1677 break;
1678 case VME_USER3:
1679 val |= TSI148_LCSR_DDAT_AMODE_USER3;
1680 break;
1681 case VME_USER4:
1682 val |= TSI148_LCSR_DDAT_AMODE_USER4;
1683 break;
1684 default:
1685 dev_err(dev, "Invalid address space\n");
1686 return -EINVAL;
1687 break;
1688 }
1689
1690 if (cycle & VME_SUPER)
1691 val |= TSI148_LCSR_DDAT_SUP;
1692 if (cycle & VME_PROG)
1693 val |= TSI148_LCSR_DDAT_PGM;
1694
1695 *attr = cpu_to_be32(val);
1696
1697 return 0;
1698 }
1699
1700 /*
1701 * Add a link list descriptor to the list
1702 *
1703 * Note: DMA engine expects the DMA descriptor to be big endian.
1704 */
1705 static int tsi148_dma_list_add(struct vme_dma_list *list,
1706 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1707 {
1708 struct tsi148_dma_entry *entry, *prev;
1709 u32 address_high, address_low, val;
1710 struct vme_dma_pattern *pattern_attr;
1711 struct vme_dma_pci *pci_attr;
1712 struct vme_dma_vme *vme_attr;
1713 int retval = 0;
1714 struct vme_bridge *tsi148_bridge;
1715
1716 tsi148_bridge = list->parent->parent;
1717
1718 /* Descriptor must be aligned on 64-bit boundaries */
1719 entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1720 if (entry == NULL) {
1721 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
1722 "dma resource structure\n");
1723 retval = -ENOMEM;
1724 goto err_mem;
1725 }
1726
1727 /* Test descriptor alignment */
1728 if ((unsigned long)&entry->descriptor & 0x7) {
1729 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1730 "byte boundary as required: %p\n",
1731 &entry->descriptor);
1732 retval = -EINVAL;
1733 goto err_align;
1734 }
1735
1736 /* Given we are going to fill out the structure, we probably don't
1737 * need to zero it, but better safe than sorry for now.
1738 */
1739 memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
1740
1741 /* Fill out source part */
1742 switch (src->type) {
1743 case VME_DMA_PATTERN:
1744 pattern_attr = src->private;
1745
1746 entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1747
1748 val = TSI148_LCSR_DSAT_TYP_PAT;
1749
1750 /* Default behaviour is 32 bit pattern */
1751 if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1752 val |= TSI148_LCSR_DSAT_PSZ;
1753
1754 /* It seems that the default behaviour is to increment */
1755 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1756 val |= TSI148_LCSR_DSAT_NIN;
1757 entry->descriptor.dsat = cpu_to_be32(val);
1758 break;
1759 case VME_DMA_PCI:
1760 pci_attr = src->private;
1761
1762 reg_split((unsigned long long)pci_attr->address, &address_high,
1763 &address_low);
1764 entry->descriptor.dsau = cpu_to_be32(address_high);
1765 entry->descriptor.dsal = cpu_to_be32(address_low);
1766 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1767 break;
1768 case VME_DMA_VME:
1769 vme_attr = src->private;
1770
1771 reg_split((unsigned long long)vme_attr->address, &address_high,
1772 &address_low);
1773 entry->descriptor.dsau = cpu_to_be32(address_high);
1774 entry->descriptor.dsal = cpu_to_be32(address_low);
1775 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1776
1777 retval = tsi148_dma_set_vme_src_attributes(
1778 tsi148_bridge->parent, &entry->descriptor.dsat,
1779 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1780 if (retval < 0)
1781 goto err_source;
1782 break;
1783 default:
1784 dev_err(tsi148_bridge->parent, "Invalid source type\n");
1785 retval = -EINVAL;
1786 goto err_source;
1787 break;
1788 }
1789
1790 /* Assume last link - this will be over-written by adding another */
1791 entry->descriptor.dnlau = cpu_to_be32(0);
1792 entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1793
1794 /* Fill out destination part */
1795 switch (dest->type) {
1796 case VME_DMA_PCI:
1797 pci_attr = dest->private;
1798
1799 reg_split((unsigned long long)pci_attr->address, &address_high,
1800 &address_low);
1801 entry->descriptor.ddau = cpu_to_be32(address_high);
1802 entry->descriptor.ddal = cpu_to_be32(address_low);
1803 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1804 break;
1805 case VME_DMA_VME:
1806 vme_attr = dest->private;
1807
1808 reg_split((unsigned long long)vme_attr->address, &address_high,
1809 &address_low);
1810 entry->descriptor.ddau = cpu_to_be32(address_high);
1811 entry->descriptor.ddal = cpu_to_be32(address_low);
1812 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1813
1814 retval = tsi148_dma_set_vme_dest_attributes(
1815 tsi148_bridge->parent, &entry->descriptor.ddat,
1816 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1817 if (retval < 0)
1818 goto err_dest;
1819 break;
1820 default:
1821 dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1822 retval = -EINVAL;
1823 goto err_dest;
1824 break;
1825 }
1826
1827 /* Fill out count */
1828 entry->descriptor.dcnt = cpu_to_be32((u32)count);
1829
1830 /* Add to list */
1831 list_add_tail(&entry->list, &list->entries);
1832
1833 /* Fill out previous descriptors "Next Address" */
1834 if (entry->list.prev != &list->entries) {
1835 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1836 list);
1837 /* We need the bus address for the pointer */
1838 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1839 &entry->descriptor,
1840 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1841
1842 reg_split((unsigned long long)entry->dma_handle, &address_high,
1843 &address_low);
1844 entry->descriptor.dnlau = cpu_to_be32(address_high);
1845 entry->descriptor.dnlal = cpu_to_be32(address_low);
1846
1847 }
1848
1849 return 0;
1850
1851 err_dest:
1852 err_source:
1853 err_align:
1854 kfree(entry);
1855 err_mem:
1856 return retval;
1857 }
1858
1859 /*
1860 * Check to see if the provided DMA channel is busy.
1861 */
1862 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1863 {
1864 u32 tmp;
1865 struct tsi148_driver *bridge;
1866
1867 bridge = tsi148_bridge->driver_priv;
1868
1869 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1870 TSI148_LCSR_OFFSET_DSTA);
1871
1872 if (tmp & TSI148_LCSR_DSTA_BSY)
1873 return 0;
1874 else
1875 return 1;
1876
1877 }
1878
1879 /*
1880 * Execute a previously generated link list
1881 *
1882 * XXX Need to provide control register configuration.
1883 */
1884 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1885 {
1886 struct vme_dma_resource *ctrlr;
1887 int channel, retval = 0;
1888 struct tsi148_dma_entry *entry;
1889 u32 bus_addr_high, bus_addr_low;
1890 u32 val, dctlreg = 0;
1891 struct vme_bridge *tsi148_bridge;
1892 struct tsi148_driver *bridge;
1893
1894 ctrlr = list->parent;
1895
1896 tsi148_bridge = ctrlr->parent;
1897
1898 bridge = tsi148_bridge->driver_priv;
1899
1900 mutex_lock(&ctrlr->mtx);
1901
1902 channel = ctrlr->number;
1903
1904 if (!list_empty(&ctrlr->running)) {
1905 /*
1906 * XXX We have an active DMA transfer and currently haven't
1907 * sorted out the mechanism for "pending" DMA transfers.
1908 * Return busy.
1909 */
1910 /* Need to add to pending here */
1911 mutex_unlock(&ctrlr->mtx);
1912 return -EBUSY;
1913 } else {
1914 list_add(&list->list, &ctrlr->running);
1915 }
1916
1917 /* Get first bus address and write into registers */
1918 entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1919 list);
1920
1921 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1922 &entry->descriptor,
1923 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1924
1925 mutex_unlock(&ctrlr->mtx);
1926
1927 reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1928
1929 iowrite32be(bus_addr_high, bridge->base +
1930 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1931 iowrite32be(bus_addr_low, bridge->base +
1932 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1933
1934 dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1935 TSI148_LCSR_OFFSET_DCTL);
1936
1937 /* Start the operation */
1938 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1939 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1940
1941 wait_event_interruptible(bridge->dma_queue[channel],
1942 tsi148_dma_busy(ctrlr->parent, channel));
1943
1944 /*
1945 * Read status register, this register is valid until we kick off a
1946 * new transfer.
1947 */
1948 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1949 TSI148_LCSR_OFFSET_DSTA);
1950
1951 if (val & TSI148_LCSR_DSTA_VBE) {
1952 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1953 retval = -EIO;
1954 }
1955
1956 /* Remove list from running list */
1957 mutex_lock(&ctrlr->mtx);
1958 list_del(&list->list);
1959 mutex_unlock(&ctrlr->mtx);
1960
1961 return retval;
1962 }
1963
1964 /*
1965 * Clean up a previously generated link list
1966 *
1967 * We have a separate function, don't assume that the chain can't be reused.
1968 */
1969 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1970 {
1971 struct list_head *pos, *temp;
1972 struct tsi148_dma_entry *entry;
1973
1974 struct vme_bridge *tsi148_bridge = list->parent->parent;
1975
1976 /* detach and free each entry */
1977 list_for_each_safe(pos, temp, &list->entries) {
1978 list_del(pos);
1979 entry = list_entry(pos, struct tsi148_dma_entry, list);
1980
1981 dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1982 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1983 kfree(entry);
1984 }
1985
1986 return 0;
1987 }
1988
1989 /*
1990 * All 4 location monitors reside at the same base - this is therefore a
1991 * system wide configuration.
1992 *
1993 * This does not enable the LM monitor - that should be done when the first
1994 * callback is attached and disabled when the last callback is removed.
1995 */
1996 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1997 u32 aspace, u32 cycle)
1998 {
1999 u32 lm_base_high, lm_base_low, lm_ctl = 0;
2000 int i;
2001 struct vme_bridge *tsi148_bridge;
2002 struct tsi148_driver *bridge;
2003
2004 tsi148_bridge = lm->parent;
2005
2006 bridge = tsi148_bridge->driver_priv;
2007
2008 mutex_lock(&lm->mtx);
2009
2010 /* If we already have a callback attached, we can't move it! */
2011 for (i = 0; i < lm->monitors; i++) {
2012 if (bridge->lm_callback[i] != NULL) {
2013 mutex_unlock(&lm->mtx);
2014 dev_err(tsi148_bridge->parent, "Location monitor "
2015 "callback attached, can't reset\n");
2016 return -EBUSY;
2017 }
2018 }
2019
2020 switch (aspace) {
2021 case VME_A16:
2022 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
2023 break;
2024 case VME_A24:
2025 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
2026 break;
2027 case VME_A32:
2028 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
2029 break;
2030 case VME_A64:
2031 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
2032 break;
2033 default:
2034 mutex_unlock(&lm->mtx);
2035 dev_err(tsi148_bridge->parent, "Invalid address space\n");
2036 return -EINVAL;
2037 break;
2038 }
2039
2040 if (cycle & VME_SUPER)
2041 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
2042 if (cycle & VME_USER)
2043 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
2044 if (cycle & VME_PROG)
2045 lm_ctl |= TSI148_LCSR_LMAT_PGM;
2046 if (cycle & VME_DATA)
2047 lm_ctl |= TSI148_LCSR_LMAT_DATA;
2048
2049 reg_split(lm_base, &lm_base_high, &lm_base_low);
2050
2051 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
2052 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
2053 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2054
2055 mutex_unlock(&lm->mtx);
2056
2057 return 0;
2058 }
2059
2060 /* Get configuration of the callback monitor and return whether it is enabled
2061 * or disabled.
2062 */
2063 static int tsi148_lm_get(struct vme_lm_resource *lm,
2064 unsigned long long *lm_base, u32 *aspace, u32 *cycle)
2065 {
2066 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
2067 struct tsi148_driver *bridge;
2068
2069 bridge = lm->parent->driver_priv;
2070
2071 mutex_lock(&lm->mtx);
2072
2073 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
2074 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
2075 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2076
2077 reg_join(lm_base_high, lm_base_low, lm_base);
2078
2079 if (lm_ctl & TSI148_LCSR_LMAT_EN)
2080 enabled = 1;
2081
2082 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
2083 *aspace |= VME_A16;
2084
2085 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
2086 *aspace |= VME_A24;
2087
2088 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
2089 *aspace |= VME_A32;
2090
2091 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
2092 *aspace |= VME_A64;
2093
2094
2095 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2096 *cycle |= VME_SUPER;
2097 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2098 *cycle |= VME_USER;
2099 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2100 *cycle |= VME_PROG;
2101 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2102 *cycle |= VME_DATA;
2103
2104 mutex_unlock(&lm->mtx);
2105
2106 return enabled;
2107 }
2108
2109 /*
2110 * Attach a callback to a specific location monitor.
2111 *
2112 * Callback will be passed the monitor triggered.
2113 */
2114 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2115 void (*callback)(int))
2116 {
2117 u32 lm_ctl, tmp;
2118 struct vme_bridge *tsi148_bridge;
2119 struct tsi148_driver *bridge;
2120
2121 tsi148_bridge = lm->parent;
2122
2123 bridge = tsi148_bridge->driver_priv;
2124
2125 mutex_lock(&lm->mtx);
2126
2127 /* Ensure that the location monitor is configured - need PGM or DATA */
2128 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2129 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2130 mutex_unlock(&lm->mtx);
2131 dev_err(tsi148_bridge->parent, "Location monitor not properly "
2132 "configured\n");
2133 return -EINVAL;
2134 }
2135
2136 /* Check that a callback isn't already attached */
2137 if (bridge->lm_callback[monitor] != NULL) {
2138 mutex_unlock(&lm->mtx);
2139 dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2140 return -EBUSY;
2141 }
2142
2143 /* Attach callback */
2144 bridge->lm_callback[monitor] = callback;
2145
2146 /* Enable Location Monitor interrupt */
2147 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2148 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2149 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2150
2151 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2152 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2153 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2154
2155 /* Ensure that global Location Monitor Enable set */
2156 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2157 lm_ctl |= TSI148_LCSR_LMAT_EN;
2158 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2159 }
2160
2161 mutex_unlock(&lm->mtx);
2162
2163 return 0;
2164 }
2165
2166 /*
2167 * Detach a callback function forn a specific location monitor.
2168 */
2169 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2170 {
2171 u32 lm_en, tmp;
2172 struct tsi148_driver *bridge;
2173
2174 bridge = lm->parent->driver_priv;
2175
2176 mutex_lock(&lm->mtx);
2177
2178 /* Disable Location Monitor and ensure previous interrupts are clear */
2179 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2180 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2181 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2182
2183 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2184 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2185 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2186
2187 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2188 bridge->base + TSI148_LCSR_INTC);
2189
2190 /* Detach callback */
2191 bridge->lm_callback[monitor] = NULL;
2192
2193 /* If all location monitors disabled, disable global Location Monitor */
2194 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2195 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2196 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2197 tmp &= ~TSI148_LCSR_LMAT_EN;
2198 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2199 }
2200
2201 mutex_unlock(&lm->mtx);
2202
2203 return 0;
2204 }
2205
2206 /*
2207 * Determine Geographical Addressing
2208 */
2209 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2210 {
2211 u32 slot = 0;
2212 struct tsi148_driver *bridge;
2213
2214 bridge = tsi148_bridge->driver_priv;
2215
2216 if (!geoid) {
2217 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2218 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2219 } else
2220 slot = geoid;
2221
2222 return (int)slot;
2223 }
2224
2225 static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2226 dma_addr_t *dma)
2227 {
2228 struct pci_dev *pdev;
2229
2230 /* Find pci_dev container of dev */
2231 pdev = container_of(parent, struct pci_dev, dev);
2232
2233 return pci_alloc_consistent(pdev, size, dma);
2234 }
2235
2236 static void tsi148_free_consistent(struct device *parent, size_t size,
2237 void *vaddr, dma_addr_t dma)
2238 {
2239 struct pci_dev *pdev;
2240
2241 /* Find pci_dev container of dev */
2242 pdev = container_of(parent, struct pci_dev, dev);
2243
2244 pci_free_consistent(pdev, size, vaddr, dma);
2245 }
2246
2247 static int __init tsi148_init(void)
2248 {
2249 return pci_register_driver(&tsi148_driver);
2250 }
2251
2252 /*
2253 * Configure CR/CSR space
2254 *
2255 * Access to the CR/CSR can be configured at power-up. The location of the
2256 * CR/CSR registers in the CR/CSR address space is determined by the boards
2257 * Auto-ID or Geographic address. This function ensures that the window is
2258 * enabled at an offset consistent with the boards geopgraphic address.
2259 *
2260 * Each board has a 512kB window, with the highest 4kB being used for the
2261 * boards registers, this means there is a fix length 508kB window which must
2262 * be mapped onto PCI memory.
2263 */
2264 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2265 struct pci_dev *pdev)
2266 {
2267 u32 cbar, crat, vstat;
2268 u32 crcsr_bus_high, crcsr_bus_low;
2269 int retval;
2270 struct tsi148_driver *bridge;
2271
2272 bridge = tsi148_bridge->driver_priv;
2273
2274 /* Allocate mem for CR/CSR image */
2275 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2276 &bridge->crcsr_bus);
2277 if (bridge->crcsr_kernel == NULL) {
2278 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2279 "CR/CSR image\n");
2280 return -ENOMEM;
2281 }
2282
2283 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2284
2285 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2286
2287 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2288 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2289
2290 /* Ensure that the CR/CSR is configured at the correct offset */
2291 cbar = ioread32be(bridge->base + TSI148_CBAR);
2292 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2293
2294 vstat = tsi148_slot_get(tsi148_bridge);
2295
2296 if (cbar != vstat) {
2297 cbar = vstat;
2298 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2299 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2300 }
2301 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2302
2303 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2304 if (crat & TSI148_LCSR_CRAT_EN) {
2305 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2306 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2307 bridge->base + TSI148_LCSR_CRAT);
2308 } else
2309 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2310
2311 /* If we want flushed, error-checked writes, set up a window
2312 * over the CR/CSR registers. We read from here to safely flush
2313 * through VME writes.
2314 */
2315 if (err_chk) {
2316 retval = tsi148_master_set(bridge->flush_image, 1,
2317 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2318 VME_D16);
2319 if (retval)
2320 dev_err(tsi148_bridge->parent, "Configuring flush image"
2321 " failed\n");
2322 }
2323
2324 return 0;
2325
2326 }
2327
2328 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2329 struct pci_dev *pdev)
2330 {
2331 u32 crat;
2332 struct tsi148_driver *bridge;
2333
2334 bridge = tsi148_bridge->driver_priv;
2335
2336 /* Turn off CR/CSR space */
2337 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2338 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2339 bridge->base + TSI148_LCSR_CRAT);
2340
2341 /* Free image */
2342 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2343 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2344
2345 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2346 bridge->crcsr_bus);
2347 }
2348
2349 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2350 {
2351 int retval, i, master_num;
2352 u32 data;
2353 struct list_head *pos = NULL;
2354 struct vme_bridge *tsi148_bridge;
2355 struct tsi148_driver *tsi148_device;
2356 struct vme_master_resource *master_image;
2357 struct vme_slave_resource *slave_image;
2358 struct vme_dma_resource *dma_ctrlr;
2359 struct vme_lm_resource *lm;
2360
2361 /* If we want to support more than one of each bridge, we need to
2362 * dynamically generate this so we get one per device
2363 */
2364 tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
2365 if (tsi148_bridge == NULL) {
2366 dev_err(&pdev->dev, "Failed to allocate memory for device "
2367 "structure\n");
2368 retval = -ENOMEM;
2369 goto err_struct;
2370 }
2371
2372 tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2373 if (tsi148_device == NULL) {
2374 dev_err(&pdev->dev, "Failed to allocate memory for device "
2375 "structure\n");
2376 retval = -ENOMEM;
2377 goto err_driver;
2378 }
2379
2380 tsi148_bridge->driver_priv = tsi148_device;
2381
2382 /* Enable the device */
2383 retval = pci_enable_device(pdev);
2384 if (retval) {
2385 dev_err(&pdev->dev, "Unable to enable device\n");
2386 goto err_enable;
2387 }
2388
2389 /* Map Registers */
2390 retval = pci_request_regions(pdev, driver_name);
2391 if (retval) {
2392 dev_err(&pdev->dev, "Unable to reserve resources\n");
2393 goto err_resource;
2394 }
2395
2396 /* map registers in BAR 0 */
2397 tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2398 4096);
2399 if (!tsi148_device->base) {
2400 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2401 retval = -EIO;
2402 goto err_remap;
2403 }
2404
2405 /* Check to see if the mapping worked out */
2406 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2407 if (data != PCI_VENDOR_ID_TUNDRA) {
2408 dev_err(&pdev->dev, "CRG region check failed\n");
2409 retval = -EIO;
2410 goto err_test;
2411 }
2412
2413 /* Initialize wait queues & mutual exclusion flags */
2414 init_waitqueue_head(&tsi148_device->dma_queue[0]);
2415 init_waitqueue_head(&tsi148_device->dma_queue[1]);
2416 init_waitqueue_head(&tsi148_device->iack_queue);
2417 mutex_init(&tsi148_device->vme_int);
2418 mutex_init(&tsi148_device->vme_rmw);
2419
2420 tsi148_bridge->parent = &pdev->dev;
2421 strcpy(tsi148_bridge->name, driver_name);
2422
2423 /* Setup IRQ */
2424 retval = tsi148_irq_init(tsi148_bridge);
2425 if (retval != 0) {
2426 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2427 goto err_irq;
2428 }
2429
2430 /* If we are going to flush writes, we need to read from the VME bus.
2431 * We need to do this safely, thus we read the devices own CR/CSR
2432 * register. To do this we must set up a window in CR/CSR space and
2433 * hence have one less master window resource available.
2434 */
2435 master_num = TSI148_MAX_MASTER;
2436 if (err_chk) {
2437 master_num--;
2438
2439 tsi148_device->flush_image =
2440 kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2441 if (tsi148_device->flush_image == NULL) {
2442 dev_err(&pdev->dev, "Failed to allocate memory for "
2443 "flush resource structure\n");
2444 retval = -ENOMEM;
2445 goto err_master;
2446 }
2447 tsi148_device->flush_image->parent = tsi148_bridge;
2448 spin_lock_init(&tsi148_device->flush_image->lock);
2449 tsi148_device->flush_image->locked = 1;
2450 tsi148_device->flush_image->number = master_num;
2451 tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
2452 VME_A32 | VME_A64;
2453 tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
2454 VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
2455 VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
2456 VME_USER | VME_PROG | VME_DATA;
2457 tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
2458 memset(&tsi148_device->flush_image->bus_resource, 0,
2459 sizeof(struct resource));
2460 tsi148_device->flush_image->kern_base = NULL;
2461 }
2462
2463 /* Add master windows to list */
2464 INIT_LIST_HEAD(&tsi148_bridge->master_resources);
2465 for (i = 0; i < master_num; i++) {
2466 master_image = kmalloc(sizeof(struct vme_master_resource),
2467 GFP_KERNEL);
2468 if (master_image == NULL) {
2469 dev_err(&pdev->dev, "Failed to allocate memory for "
2470 "master resource structure\n");
2471 retval = -ENOMEM;
2472 goto err_master;
2473 }
2474 master_image->parent = tsi148_bridge;
2475 spin_lock_init(&master_image->lock);
2476 master_image->locked = 0;
2477 master_image->number = i;
2478 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2479 VME_A64;
2480 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2481 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2482 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2483 VME_PROG | VME_DATA;
2484 master_image->width_attr = VME_D16 | VME_D32;
2485 memset(&master_image->bus_resource, 0,
2486 sizeof(struct resource));
2487 master_image->kern_base = NULL;
2488 list_add_tail(&master_image->list,
2489 &tsi148_bridge->master_resources);
2490 }
2491
2492 /* Add slave windows to list */
2493 INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
2494 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2495 slave_image = kmalloc(sizeof(struct vme_slave_resource),
2496 GFP_KERNEL);
2497 if (slave_image == NULL) {
2498 dev_err(&pdev->dev, "Failed to allocate memory for "
2499 "slave resource structure\n");
2500 retval = -ENOMEM;
2501 goto err_slave;
2502 }
2503 slave_image->parent = tsi148_bridge;
2504 mutex_init(&slave_image->mtx);
2505 slave_image->locked = 0;
2506 slave_image->number = i;
2507 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2508 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2509 VME_USER3 | VME_USER4;
2510 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2511 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2512 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2513 VME_PROG | VME_DATA;
2514 list_add_tail(&slave_image->list,
2515 &tsi148_bridge->slave_resources);
2516 }
2517
2518 /* Add dma engines to list */
2519 INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
2520 for (i = 0; i < TSI148_MAX_DMA; i++) {
2521 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
2522 GFP_KERNEL);
2523 if (dma_ctrlr == NULL) {
2524 dev_err(&pdev->dev, "Failed to allocate memory for "
2525 "dma resource structure\n");
2526 retval = -ENOMEM;
2527 goto err_dma;
2528 }
2529 dma_ctrlr->parent = tsi148_bridge;
2530 mutex_init(&dma_ctrlr->mtx);
2531 dma_ctrlr->locked = 0;
2532 dma_ctrlr->number = i;
2533 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2534 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2535 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2536 VME_DMA_PATTERN_TO_MEM;
2537 INIT_LIST_HEAD(&dma_ctrlr->pending);
2538 INIT_LIST_HEAD(&dma_ctrlr->running);
2539 list_add_tail(&dma_ctrlr->list,
2540 &tsi148_bridge->dma_resources);
2541 }
2542
2543 /* Add location monitor to list */
2544 INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
2545 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2546 if (lm == NULL) {
2547 dev_err(&pdev->dev, "Failed to allocate memory for "
2548 "location monitor resource structure\n");
2549 retval = -ENOMEM;
2550 goto err_lm;
2551 }
2552 lm->parent = tsi148_bridge;
2553 mutex_init(&lm->mtx);
2554 lm->locked = 0;
2555 lm->number = 1;
2556 lm->monitors = 4;
2557 list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2558
2559 tsi148_bridge->slave_get = tsi148_slave_get;
2560 tsi148_bridge->slave_set = tsi148_slave_set;
2561 tsi148_bridge->master_get = tsi148_master_get;
2562 tsi148_bridge->master_set = tsi148_master_set;
2563 tsi148_bridge->master_read = tsi148_master_read;
2564 tsi148_bridge->master_write = tsi148_master_write;
2565 tsi148_bridge->master_rmw = tsi148_master_rmw;
2566 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2567 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2568 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2569 tsi148_bridge->irq_set = tsi148_irq_set;
2570 tsi148_bridge->irq_generate = tsi148_irq_generate;
2571 tsi148_bridge->lm_set = tsi148_lm_set;
2572 tsi148_bridge->lm_get = tsi148_lm_get;
2573 tsi148_bridge->lm_attach = tsi148_lm_attach;
2574 tsi148_bridge->lm_detach = tsi148_lm_detach;
2575 tsi148_bridge->slot_get = tsi148_slot_get;
2576 tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2577 tsi148_bridge->free_consistent = tsi148_free_consistent;
2578
2579 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2580 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2581 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2582 if (!geoid)
2583 dev_info(&pdev->dev, "VME geographical address is %d\n",
2584 data & TSI148_LCSR_VSTAT_GA_M);
2585 else
2586 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2587 geoid);
2588
2589 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2590 err_chk ? "enabled" : "disabled");
2591
2592 if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
2593 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2594 goto err_crcsr;
2595 }
2596
2597 retval = vme_register_bridge(tsi148_bridge);
2598 if (retval != 0) {
2599 dev_err(&pdev->dev, "Chip Registration failed.\n");
2600 goto err_reg;
2601 }
2602
2603 pci_set_drvdata(pdev, tsi148_bridge);
2604
2605 /* Clear VME bus "board fail", and "power-up reset" lines */
2606 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2607 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2608 data |= TSI148_LCSR_VSTAT_CPURST;
2609 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2610
2611 return 0;
2612
2613 err_reg:
2614 tsi148_crcsr_exit(tsi148_bridge, pdev);
2615 err_crcsr:
2616 err_lm:
2617 /* resources are stored in link list */
2618 list_for_each(pos, &tsi148_bridge->lm_resources) {
2619 lm = list_entry(pos, struct vme_lm_resource, list);
2620 list_del(pos);
2621 kfree(lm);
2622 }
2623 err_dma:
2624 /* resources are stored in link list */
2625 list_for_each(pos, &tsi148_bridge->dma_resources) {
2626 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2627 list_del(pos);
2628 kfree(dma_ctrlr);
2629 }
2630 err_slave:
2631 /* resources are stored in link list */
2632 list_for_each(pos, &tsi148_bridge->slave_resources) {
2633 slave_image = list_entry(pos, struct vme_slave_resource, list);
2634 list_del(pos);
2635 kfree(slave_image);
2636 }
2637 err_master:
2638 /* resources are stored in link list */
2639 list_for_each(pos, &tsi148_bridge->master_resources) {
2640 master_image = list_entry(pos, struct vme_master_resource,
2641 list);
2642 list_del(pos);
2643 kfree(master_image);
2644 }
2645
2646 tsi148_irq_exit(tsi148_bridge, pdev);
2647 err_irq:
2648 err_test:
2649 iounmap(tsi148_device->base);
2650 err_remap:
2651 pci_release_regions(pdev);
2652 err_resource:
2653 pci_disable_device(pdev);
2654 err_enable:
2655 kfree(tsi148_device);
2656 err_driver:
2657 kfree(tsi148_bridge);
2658 err_struct:
2659 return retval;
2660
2661 }
2662
2663 static void tsi148_remove(struct pci_dev *pdev)
2664 {
2665 struct list_head *pos = NULL;
2666 struct list_head *tmplist;
2667 struct vme_master_resource *master_image;
2668 struct vme_slave_resource *slave_image;
2669 struct vme_dma_resource *dma_ctrlr;
2670 int i;
2671 struct tsi148_driver *bridge;
2672 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2673
2674 bridge = tsi148_bridge->driver_priv;
2675
2676
2677 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2678
2679 /*
2680 * Shutdown all inbound and outbound windows.
2681 */
2682 for (i = 0; i < 8; i++) {
2683 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2684 TSI148_LCSR_OFFSET_ITAT);
2685 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2686 TSI148_LCSR_OFFSET_OTAT);
2687 }
2688
2689 /*
2690 * Shutdown Location monitor.
2691 */
2692 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2693
2694 /*
2695 * Shutdown CRG map.
2696 */
2697 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2698
2699 /*
2700 * Clear error status.
2701 */
2702 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2703 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2704 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2705
2706 /*
2707 * Remove VIRQ interrupt (if any)
2708 */
2709 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2710 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2711
2712 /*
2713 * Map all Interrupts to PCI INTA
2714 */
2715 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2716 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2717
2718 tsi148_irq_exit(tsi148_bridge, pdev);
2719
2720 vme_unregister_bridge(tsi148_bridge);
2721
2722 tsi148_crcsr_exit(tsi148_bridge, pdev);
2723
2724 /* resources are stored in link list */
2725 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2726 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2727 list_del(pos);
2728 kfree(dma_ctrlr);
2729 }
2730
2731 /* resources are stored in link list */
2732 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2733 slave_image = list_entry(pos, struct vme_slave_resource, list);
2734 list_del(pos);
2735 kfree(slave_image);
2736 }
2737
2738 /* resources are stored in link list */
2739 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2740 master_image = list_entry(pos, struct vme_master_resource,
2741 list);
2742 list_del(pos);
2743 kfree(master_image);
2744 }
2745
2746 iounmap(bridge->base);
2747
2748 pci_release_regions(pdev);
2749
2750 pci_disable_device(pdev);
2751
2752 kfree(tsi148_bridge->driver_priv);
2753
2754 kfree(tsi148_bridge);
2755 }
2756
2757 static void __exit tsi148_exit(void)
2758 {
2759 pci_unregister_driver(&tsi148_driver);
2760 }
2761
2762 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2763 module_param(err_chk, bool, 0);
2764
2765 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2766 module_param(geoid, int, 0);
2767
2768 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2769 MODULE_LICENSE("GPL");
2770
2771 module_init(tsi148_init);
2772 module_exit(tsi148_exit);