Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / vme / bridges / vme_tsi148.c
1 /*
2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32 #include <linux/byteorder/generic.h>
33 #include <linux/vme.h>
34
35 #include "../vme_bridge.h"
36 #include "vme_tsi148.h"
37
38 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
39 static void tsi148_remove(struct pci_dev *);
40
41
42 /* Module parameter */
43 static bool err_chk;
44 static int geoid;
45
46 static const char driver_name[] = "vme_tsi148";
47
48 static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
49 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
50 { },
51 };
52
53 static struct pci_driver tsi148_driver = {
54 .name = driver_name,
55 .id_table = tsi148_ids,
56 .probe = tsi148_probe,
57 .remove = tsi148_remove,
58 };
59
60 static void reg_join(unsigned int high, unsigned int low,
61 unsigned long long *variable)
62 {
63 *variable = (unsigned long long)high << 32;
64 *variable |= (unsigned long long)low;
65 }
66
67 static void reg_split(unsigned long long variable, unsigned int *high,
68 unsigned int *low)
69 {
70 *low = (unsigned int)variable & 0xFFFFFFFF;
71 *high = (unsigned int)(variable >> 32);
72 }
73
74 /*
75 * Wakes up DMA queue.
76 */
77 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
78 int channel_mask)
79 {
80 u32 serviced = 0;
81
82 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
83 wake_up(&bridge->dma_queue[0]);
84 serviced |= TSI148_LCSR_INTC_DMA0C;
85 }
86 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
87 wake_up(&bridge->dma_queue[1]);
88 serviced |= TSI148_LCSR_INTC_DMA1C;
89 }
90
91 return serviced;
92 }
93
94 /*
95 * Wake up location monitor queue
96 */
97 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
98 {
99 int i;
100 u32 serviced = 0;
101
102 for (i = 0; i < 4; i++) {
103 if (stat & TSI148_LCSR_INTS_LMS[i]) {
104 /* We only enable interrupts if the callback is set */
105 bridge->lm_callback[i](i);
106 serviced |= TSI148_LCSR_INTC_LMC[i];
107 }
108 }
109
110 return serviced;
111 }
112
113 /*
114 * Wake up mail box queue.
115 *
116 * XXX This functionality is not exposed up though API.
117 */
118 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
119 {
120 int i;
121 u32 val;
122 u32 serviced = 0;
123 struct tsi148_driver *bridge;
124
125 bridge = tsi148_bridge->driver_priv;
126
127 for (i = 0; i < 4; i++) {
128 if (stat & TSI148_LCSR_INTS_MBS[i]) {
129 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
130 dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
131 ": 0x%x\n", i, val);
132 serviced |= TSI148_LCSR_INTC_MBC[i];
133 }
134 }
135
136 return serviced;
137 }
138
139 /*
140 * Display error & status message when PERR (PCI) exception interrupt occurs.
141 */
142 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
143 {
144 struct tsi148_driver *bridge;
145
146 bridge = tsi148_bridge->driver_priv;
147
148 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
149 "attributes: %08x\n",
150 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
151 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
152 ioread32be(bridge->base + TSI148_LCSR_EDPAT));
153
154 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
155 "completion reg: %08x\n",
156 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
157 ioread32be(bridge->base + TSI148_LCSR_EDPXS));
158
159 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
160
161 return TSI148_LCSR_INTC_PERRC;
162 }
163
164 /*
165 * Save address and status when VME error interrupt occurs.
166 */
167 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
168 {
169 unsigned int error_addr_high, error_addr_low;
170 unsigned long long error_addr;
171 u32 error_attrib;
172 struct vme_bus_error *error;
173 struct tsi148_driver *bridge;
174
175 bridge = tsi148_bridge->driver_priv;
176
177 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
178 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
179 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
180
181 reg_join(error_addr_high, error_addr_low, &error_addr);
182
183 /* Check for exception register overflow (we have lost error data) */
184 if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
185 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
186 "Occurred\n");
187 }
188
189 error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
190 if (error) {
191 error->address = error_addr;
192 error->attributes = error_attrib;
193 list_add_tail(&error->list, &tsi148_bridge->vme_errors);
194 } else {
195 dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
196 "VMEbus Error reporting\n");
197 dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
198 "0x%llx, attributes: %08x\n", error_addr, error_attrib);
199 }
200
201 /* Clear Status */
202 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
203
204 return TSI148_LCSR_INTC_VERRC;
205 }
206
207 /*
208 * Wake up IACK queue.
209 */
210 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
211 {
212 wake_up(&bridge->iack_queue);
213
214 return TSI148_LCSR_INTC_IACKC;
215 }
216
217 /*
218 * Calling VME bus interrupt callback if provided.
219 */
220 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
221 u32 stat)
222 {
223 int vec, i, serviced = 0;
224 struct tsi148_driver *bridge;
225
226 bridge = tsi148_bridge->driver_priv;
227
228 for (i = 7; i > 0; i--) {
229 if (stat & (1 << i)) {
230 /*
231 * Note: Even though the registers are defined as
232 * 32-bits in the spec, we only want to issue 8-bit
233 * IACK cycles on the bus, read from offset 3.
234 */
235 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
236
237 vme_irq_handler(tsi148_bridge, i, vec);
238
239 serviced |= (1 << i);
240 }
241 }
242
243 return serviced;
244 }
245
246 /*
247 * Top level interrupt handler. Clears appropriate interrupt status bits and
248 * then calls appropriate sub handler(s).
249 */
250 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
251 {
252 u32 stat, enable, serviced = 0;
253 struct vme_bridge *tsi148_bridge;
254 struct tsi148_driver *bridge;
255
256 tsi148_bridge = ptr;
257
258 bridge = tsi148_bridge->driver_priv;
259
260 /* Determine which interrupts are unmasked and set */
261 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
262 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
263
264 /* Only look at unmasked interrupts */
265 stat &= enable;
266
267 if (unlikely(!stat))
268 return IRQ_NONE;
269
270 /* Call subhandlers as appropriate */
271 /* DMA irqs */
272 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
273 serviced |= tsi148_DMA_irqhandler(bridge, stat);
274
275 /* Location monitor irqs */
276 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
277 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
278 serviced |= tsi148_LM_irqhandler(bridge, stat);
279
280 /* Mail box irqs */
281 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
282 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
283 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
284
285 /* PCI bus error */
286 if (stat & TSI148_LCSR_INTS_PERRS)
287 serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
288
289 /* VME bus error */
290 if (stat & TSI148_LCSR_INTS_VERRS)
291 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
292
293 /* IACK irq */
294 if (stat & TSI148_LCSR_INTS_IACKS)
295 serviced |= tsi148_IACK_irqhandler(bridge);
296
297 /* VME bus irqs */
298 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
299 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
300 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
301 TSI148_LCSR_INTS_IRQ1S))
302 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
303
304 /* Clear serviced interrupts */
305 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
306
307 return IRQ_HANDLED;
308 }
309
310 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
311 {
312 int result;
313 unsigned int tmp;
314 struct pci_dev *pdev;
315 struct tsi148_driver *bridge;
316
317 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
318
319 bridge = tsi148_bridge->driver_priv;
320
321 /* Initialise list for VME bus errors */
322 INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
323
324 mutex_init(&tsi148_bridge->irq_mtx);
325
326 result = request_irq(pdev->irq,
327 tsi148_irqhandler,
328 IRQF_SHARED,
329 driver_name, tsi148_bridge);
330 if (result) {
331 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
332 "vector %02X\n", pdev->irq);
333 return result;
334 }
335
336 /* Enable and unmask interrupts */
337 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
338 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
339 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
340 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
341 TSI148_LCSR_INTEO_IACKEO;
342
343 /* This leaves the following interrupts masked.
344 * TSI148_LCSR_INTEO_VIEEO
345 * TSI148_LCSR_INTEO_SYSFLEO
346 * TSI148_LCSR_INTEO_ACFLEO
347 */
348
349 /* Don't enable Location Monitor interrupts here - they will be
350 * enabled when the location monitors are properly configured and
351 * a callback has been attached.
352 * TSI148_LCSR_INTEO_LM0EO
353 * TSI148_LCSR_INTEO_LM1EO
354 * TSI148_LCSR_INTEO_LM2EO
355 * TSI148_LCSR_INTEO_LM3EO
356 */
357
358 /* Don't enable VME interrupts until we add a handler, else the board
359 * will respond to it and we don't want that unless it knows how to
360 * properly deal with it.
361 * TSI148_LCSR_INTEO_IRQ7EO
362 * TSI148_LCSR_INTEO_IRQ6EO
363 * TSI148_LCSR_INTEO_IRQ5EO
364 * TSI148_LCSR_INTEO_IRQ4EO
365 * TSI148_LCSR_INTEO_IRQ3EO
366 * TSI148_LCSR_INTEO_IRQ2EO
367 * TSI148_LCSR_INTEO_IRQ1EO
368 */
369
370 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
371 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
372
373 return 0;
374 }
375
376 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
377 struct pci_dev *pdev)
378 {
379 struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
380
381 /* Turn off interrupts */
382 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
383 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
384
385 /* Clear all interrupts */
386 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
387
388 /* Detach interrupt handler */
389 free_irq(pdev->irq, tsi148_bridge);
390 }
391
392 /*
393 * Check to see if an IACk has been received, return true (1) or false (0).
394 */
395 static int tsi148_iack_received(struct tsi148_driver *bridge)
396 {
397 u32 tmp;
398
399 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
400
401 if (tmp & TSI148_LCSR_VICR_IRQS)
402 return 0;
403 else
404 return 1;
405 }
406
407 /*
408 * Configure VME interrupt
409 */
410 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
411 int state, int sync)
412 {
413 struct pci_dev *pdev;
414 u32 tmp;
415 struct tsi148_driver *bridge;
416
417 bridge = tsi148_bridge->driver_priv;
418
419 /* We need to do the ordering differently for enabling and disabling */
420 if (state == 0) {
421 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
422 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
423 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
424
425 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
426 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
427 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
428
429 if (sync != 0) {
430 pdev = container_of(tsi148_bridge->parent,
431 struct pci_dev, dev);
432
433 synchronize_irq(pdev->irq);
434 }
435 } else {
436 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
437 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
438 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
439
440 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
441 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
442 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
443 }
444 }
445
446 /*
447 * Generate a VME bus interrupt at the requested level & vector. Wait for
448 * interrupt to be acked.
449 */
450 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
451 int statid)
452 {
453 u32 tmp;
454 struct tsi148_driver *bridge;
455
456 bridge = tsi148_bridge->driver_priv;
457
458 mutex_lock(&bridge->vme_int);
459
460 /* Read VICR register */
461 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
462
463 /* Set Status/ID */
464 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
465 (statid & TSI148_LCSR_VICR_STID_M);
466 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
467
468 /* Assert VMEbus IRQ */
469 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
470 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
471
472 /* XXX Consider implementing a timeout? */
473 wait_event_interruptible(bridge->iack_queue,
474 tsi148_iack_received(bridge));
475
476 mutex_unlock(&bridge->vme_int);
477
478 return 0;
479 }
480
481 /*
482 * Find the first error in this address range
483 */
484 static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
485 u32 aspace, unsigned long long address, size_t count)
486 {
487 struct list_head *err_pos;
488 struct vme_bus_error *vme_err, *valid = NULL;
489 unsigned long long bound;
490
491 bound = address + count;
492
493 /*
494 * XXX We are currently not looking at the address space when parsing
495 * for errors. This is because parsing the Address Modifier Codes
496 * is going to be quite resource intensive to do properly. We
497 * should be OK just looking at the addresses and this is certainly
498 * much better than what we had before.
499 */
500 err_pos = NULL;
501 /* Iterate through errors */
502 list_for_each(err_pos, &tsi148_bridge->vme_errors) {
503 vme_err = list_entry(err_pos, struct vme_bus_error, list);
504 if ((vme_err->address >= address) &&
505 (vme_err->address < bound)) {
506
507 valid = vme_err;
508 break;
509 }
510 }
511
512 return valid;
513 }
514
515 /*
516 * Clear errors in the provided address range.
517 */
518 static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
519 u32 aspace, unsigned long long address, size_t count)
520 {
521 struct list_head *err_pos, *temp;
522 struct vme_bus_error *vme_err;
523 unsigned long long bound;
524
525 bound = address + count;
526
527 /*
528 * XXX We are currently not looking at the address space when parsing
529 * for errors. This is because parsing the Address Modifier Codes
530 * is going to be quite resource intensive to do properly. We
531 * should be OK just looking at the addresses and this is certainly
532 * much better than what we had before.
533 */
534 err_pos = NULL;
535 /* Iterate through errors */
536 list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
537 vme_err = list_entry(err_pos, struct vme_bus_error, list);
538
539 if ((vme_err->address >= address) &&
540 (vme_err->address < bound)) {
541
542 list_del(err_pos);
543 kfree(vme_err);
544 }
545 }
546 }
547
548 /*
549 * Initialize a slave window with the requested attributes.
550 */
551 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
552 unsigned long long vme_base, unsigned long long size,
553 dma_addr_t pci_base, u32 aspace, u32 cycle)
554 {
555 unsigned int i, addr = 0, granularity = 0;
556 unsigned int temp_ctl = 0;
557 unsigned int vme_base_low, vme_base_high;
558 unsigned int vme_bound_low, vme_bound_high;
559 unsigned int pci_offset_low, pci_offset_high;
560 unsigned long long vme_bound, pci_offset;
561 struct vme_bridge *tsi148_bridge;
562 struct tsi148_driver *bridge;
563
564 tsi148_bridge = image->parent;
565 bridge = tsi148_bridge->driver_priv;
566
567 i = image->number;
568
569 switch (aspace) {
570 case VME_A16:
571 granularity = 0x10;
572 addr |= TSI148_LCSR_ITAT_AS_A16;
573 break;
574 case VME_A24:
575 granularity = 0x1000;
576 addr |= TSI148_LCSR_ITAT_AS_A24;
577 break;
578 case VME_A32:
579 granularity = 0x10000;
580 addr |= TSI148_LCSR_ITAT_AS_A32;
581 break;
582 case VME_A64:
583 granularity = 0x10000;
584 addr |= TSI148_LCSR_ITAT_AS_A64;
585 break;
586 case VME_CRCSR:
587 case VME_USER1:
588 case VME_USER2:
589 case VME_USER3:
590 case VME_USER4:
591 default:
592 dev_err(tsi148_bridge->parent, "Invalid address space\n");
593 return -EINVAL;
594 break;
595 }
596
597 /* Convert 64-bit variables to 2x 32-bit variables */
598 reg_split(vme_base, &vme_base_high, &vme_base_low);
599
600 /*
601 * Bound address is a valid address for the window, adjust
602 * accordingly
603 */
604 vme_bound = vme_base + size - granularity;
605 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
606 pci_offset = (unsigned long long)pci_base - vme_base;
607 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
608
609 if (vme_base_low & (granularity - 1)) {
610 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
611 return -EINVAL;
612 }
613 if (vme_bound_low & (granularity - 1)) {
614 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
615 return -EINVAL;
616 }
617 if (pci_offset_low & (granularity - 1)) {
618 dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
619 "alignment\n");
620 return -EINVAL;
621 }
622
623 /* Disable while we are mucking around */
624 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
625 TSI148_LCSR_OFFSET_ITAT);
626 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
627 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
628 TSI148_LCSR_OFFSET_ITAT);
629
630 /* Setup mapping */
631 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
632 TSI148_LCSR_OFFSET_ITSAU);
633 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
634 TSI148_LCSR_OFFSET_ITSAL);
635 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
636 TSI148_LCSR_OFFSET_ITEAU);
637 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
638 TSI148_LCSR_OFFSET_ITEAL);
639 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
640 TSI148_LCSR_OFFSET_ITOFU);
641 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
642 TSI148_LCSR_OFFSET_ITOFL);
643
644 /* Setup 2eSST speeds */
645 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
646 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
647 case VME_2eSST160:
648 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
649 break;
650 case VME_2eSST267:
651 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
652 break;
653 case VME_2eSST320:
654 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
655 break;
656 }
657
658 /* Setup cycle types */
659 temp_ctl &= ~(0x1F << 7);
660 if (cycle & VME_BLT)
661 temp_ctl |= TSI148_LCSR_ITAT_BLT;
662 if (cycle & VME_MBLT)
663 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
664 if (cycle & VME_2eVME)
665 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
666 if (cycle & VME_2eSST)
667 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
668 if (cycle & VME_2eSSTB)
669 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
670
671 /* Setup address space */
672 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
673 temp_ctl |= addr;
674
675 temp_ctl &= ~0xF;
676 if (cycle & VME_SUPER)
677 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
678 if (cycle & VME_USER)
679 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
680 if (cycle & VME_PROG)
681 temp_ctl |= TSI148_LCSR_ITAT_PGM;
682 if (cycle & VME_DATA)
683 temp_ctl |= TSI148_LCSR_ITAT_DATA;
684
685 /* Write ctl reg without enable */
686 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
687 TSI148_LCSR_OFFSET_ITAT);
688
689 if (enabled)
690 temp_ctl |= TSI148_LCSR_ITAT_EN;
691
692 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
693 TSI148_LCSR_OFFSET_ITAT);
694
695 return 0;
696 }
697
698 /*
699 * Get slave window configuration.
700 */
701 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
702 unsigned long long *vme_base, unsigned long long *size,
703 dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
704 {
705 unsigned int i, granularity = 0, ctl = 0;
706 unsigned int vme_base_low, vme_base_high;
707 unsigned int vme_bound_low, vme_bound_high;
708 unsigned int pci_offset_low, pci_offset_high;
709 unsigned long long vme_bound, pci_offset;
710 struct tsi148_driver *bridge;
711
712 bridge = image->parent->driver_priv;
713
714 i = image->number;
715
716 /* Read registers */
717 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
718 TSI148_LCSR_OFFSET_ITAT);
719
720 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
721 TSI148_LCSR_OFFSET_ITSAU);
722 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
723 TSI148_LCSR_OFFSET_ITSAL);
724 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
725 TSI148_LCSR_OFFSET_ITEAU);
726 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
727 TSI148_LCSR_OFFSET_ITEAL);
728 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
729 TSI148_LCSR_OFFSET_ITOFU);
730 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
731 TSI148_LCSR_OFFSET_ITOFL);
732
733 /* Convert 64-bit variables to 2x 32-bit variables */
734 reg_join(vme_base_high, vme_base_low, vme_base);
735 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
736 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
737
738 *pci_base = (dma_addr_t)vme_base + pci_offset;
739
740 *enabled = 0;
741 *aspace = 0;
742 *cycle = 0;
743
744 if (ctl & TSI148_LCSR_ITAT_EN)
745 *enabled = 1;
746
747 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
748 granularity = 0x10;
749 *aspace |= VME_A16;
750 }
751 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
752 granularity = 0x1000;
753 *aspace |= VME_A24;
754 }
755 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
756 granularity = 0x10000;
757 *aspace |= VME_A32;
758 }
759 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
760 granularity = 0x10000;
761 *aspace |= VME_A64;
762 }
763
764 /* Need granularity before we set the size */
765 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
766
767
768 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
769 *cycle |= VME_2eSST160;
770 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
771 *cycle |= VME_2eSST267;
772 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
773 *cycle |= VME_2eSST320;
774
775 if (ctl & TSI148_LCSR_ITAT_BLT)
776 *cycle |= VME_BLT;
777 if (ctl & TSI148_LCSR_ITAT_MBLT)
778 *cycle |= VME_MBLT;
779 if (ctl & TSI148_LCSR_ITAT_2eVME)
780 *cycle |= VME_2eVME;
781 if (ctl & TSI148_LCSR_ITAT_2eSST)
782 *cycle |= VME_2eSST;
783 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
784 *cycle |= VME_2eSSTB;
785
786 if (ctl & TSI148_LCSR_ITAT_SUPR)
787 *cycle |= VME_SUPER;
788 if (ctl & TSI148_LCSR_ITAT_NPRIV)
789 *cycle |= VME_USER;
790 if (ctl & TSI148_LCSR_ITAT_PGM)
791 *cycle |= VME_PROG;
792 if (ctl & TSI148_LCSR_ITAT_DATA)
793 *cycle |= VME_DATA;
794
795 return 0;
796 }
797
798 /*
799 * Allocate and map PCI Resource
800 */
801 static int tsi148_alloc_resource(struct vme_master_resource *image,
802 unsigned long long size)
803 {
804 unsigned long long existing_size;
805 int retval = 0;
806 struct pci_dev *pdev;
807 struct vme_bridge *tsi148_bridge;
808
809 tsi148_bridge = image->parent;
810
811 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
812
813 existing_size = (unsigned long long)(image->bus_resource.end -
814 image->bus_resource.start);
815
816 /* If the existing size is OK, return */
817 if ((size != 0) && (existing_size == (size - 1)))
818 return 0;
819
820 if (existing_size != 0) {
821 iounmap(image->kern_base);
822 image->kern_base = NULL;
823 kfree(image->bus_resource.name);
824 release_resource(&image->bus_resource);
825 memset(&image->bus_resource, 0, sizeof(struct resource));
826 }
827
828 /* Exit here if size is zero */
829 if (size == 0)
830 return 0;
831
832 if (image->bus_resource.name == NULL) {
833 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
834 if (image->bus_resource.name == NULL) {
835 dev_err(tsi148_bridge->parent, "Unable to allocate "
836 "memory for resource name\n");
837 retval = -ENOMEM;
838 goto err_name;
839 }
840 }
841
842 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
843 image->number);
844
845 image->bus_resource.start = 0;
846 image->bus_resource.end = (unsigned long)size;
847 image->bus_resource.flags = IORESOURCE_MEM;
848
849 retval = pci_bus_alloc_resource(pdev->bus,
850 &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
851 0, NULL, NULL);
852 if (retval) {
853 dev_err(tsi148_bridge->parent, "Failed to allocate mem "
854 "resource for window %d size 0x%lx start 0x%lx\n",
855 image->number, (unsigned long)size,
856 (unsigned long)image->bus_resource.start);
857 goto err_resource;
858 }
859
860 image->kern_base = ioremap_nocache(
861 image->bus_resource.start, size);
862 if (image->kern_base == NULL) {
863 dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
864 retval = -ENOMEM;
865 goto err_remap;
866 }
867
868 return 0;
869
870 err_remap:
871 release_resource(&image->bus_resource);
872 err_resource:
873 kfree(image->bus_resource.name);
874 memset(&image->bus_resource, 0, sizeof(struct resource));
875 err_name:
876 return retval;
877 }
878
879 /*
880 * Free and unmap PCI Resource
881 */
882 static void tsi148_free_resource(struct vme_master_resource *image)
883 {
884 iounmap(image->kern_base);
885 image->kern_base = NULL;
886 release_resource(&image->bus_resource);
887 kfree(image->bus_resource.name);
888 memset(&image->bus_resource, 0, sizeof(struct resource));
889 }
890
891 /*
892 * Set the attributes of an outbound window.
893 */
894 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
895 unsigned long long vme_base, unsigned long long size, u32 aspace,
896 u32 cycle, u32 dwidth)
897 {
898 int retval = 0;
899 unsigned int i;
900 unsigned int temp_ctl = 0;
901 unsigned int pci_base_low, pci_base_high;
902 unsigned int pci_bound_low, pci_bound_high;
903 unsigned int vme_offset_low, vme_offset_high;
904 unsigned long long pci_bound, vme_offset, pci_base;
905 struct vme_bridge *tsi148_bridge;
906 struct tsi148_driver *bridge;
907
908 tsi148_bridge = image->parent;
909
910 bridge = tsi148_bridge->driver_priv;
911
912 /* Verify input data */
913 if (vme_base & 0xFFFF) {
914 dev_err(tsi148_bridge->parent, "Invalid VME Window "
915 "alignment\n");
916 retval = -EINVAL;
917 goto err_window;
918 }
919
920 if ((size == 0) && (enabled != 0)) {
921 dev_err(tsi148_bridge->parent, "Size must be non-zero for "
922 "enabled windows\n");
923 retval = -EINVAL;
924 goto err_window;
925 }
926
927 spin_lock(&image->lock);
928
929 /* Let's allocate the resource here rather than further up the stack as
930 * it avoids pushing loads of bus dependent stuff up the stack. If size
931 * is zero, any existing resource will be freed.
932 */
933 retval = tsi148_alloc_resource(image, size);
934 if (retval) {
935 spin_unlock(&image->lock);
936 dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
937 "resource\n");
938 goto err_res;
939 }
940
941 if (size == 0) {
942 pci_base = 0;
943 pci_bound = 0;
944 vme_offset = 0;
945 } else {
946 pci_base = (unsigned long long)image->bus_resource.start;
947
948 /*
949 * Bound address is a valid address for the window, adjust
950 * according to window granularity.
951 */
952 pci_bound = pci_base + (size - 0x10000);
953 vme_offset = vme_base - pci_base;
954 }
955
956 /* Convert 64-bit variables to 2x 32-bit variables */
957 reg_split(pci_base, &pci_base_high, &pci_base_low);
958 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
959 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
960
961 if (pci_base_low & 0xFFFF) {
962 spin_unlock(&image->lock);
963 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
964 retval = -EINVAL;
965 goto err_gran;
966 }
967 if (pci_bound_low & 0xFFFF) {
968 spin_unlock(&image->lock);
969 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
970 retval = -EINVAL;
971 goto err_gran;
972 }
973 if (vme_offset_low & 0xFFFF) {
974 spin_unlock(&image->lock);
975 dev_err(tsi148_bridge->parent, "Invalid VME Offset "
976 "alignment\n");
977 retval = -EINVAL;
978 goto err_gran;
979 }
980
981 i = image->number;
982
983 /* Disable while we are mucking around */
984 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
985 TSI148_LCSR_OFFSET_OTAT);
986 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
987 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
988 TSI148_LCSR_OFFSET_OTAT);
989
990 /* Setup 2eSST speeds */
991 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
992 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
993 case VME_2eSST160:
994 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
995 break;
996 case VME_2eSST267:
997 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
998 break;
999 case VME_2eSST320:
1000 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1001 break;
1002 }
1003
1004 /* Setup cycle types */
1005 if (cycle & VME_BLT) {
1006 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1007 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1008 }
1009 if (cycle & VME_MBLT) {
1010 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1011 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1012 }
1013 if (cycle & VME_2eVME) {
1014 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1015 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1016 }
1017 if (cycle & VME_2eSST) {
1018 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1019 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1020 }
1021 if (cycle & VME_2eSSTB) {
1022 dev_warn(tsi148_bridge->parent, "Currently not setting "
1023 "Broadcast Select Registers\n");
1024 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1025 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1026 }
1027
1028 /* Setup data width */
1029 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1030 switch (dwidth) {
1031 case VME_D16:
1032 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1033 break;
1034 case VME_D32:
1035 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1036 break;
1037 default:
1038 spin_unlock(&image->lock);
1039 dev_err(tsi148_bridge->parent, "Invalid data width\n");
1040 retval = -EINVAL;
1041 goto err_dwidth;
1042 }
1043
1044 /* Setup address space */
1045 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1046 switch (aspace) {
1047 case VME_A16:
1048 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1049 break;
1050 case VME_A24:
1051 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1052 break;
1053 case VME_A32:
1054 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1055 break;
1056 case VME_A64:
1057 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1058 break;
1059 case VME_CRCSR:
1060 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1061 break;
1062 case VME_USER1:
1063 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1064 break;
1065 case VME_USER2:
1066 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1067 break;
1068 case VME_USER3:
1069 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1070 break;
1071 case VME_USER4:
1072 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1073 break;
1074 default:
1075 spin_unlock(&image->lock);
1076 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1077 retval = -EINVAL;
1078 goto err_aspace;
1079 break;
1080 }
1081
1082 temp_ctl &= ~(3<<4);
1083 if (cycle & VME_SUPER)
1084 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1085 if (cycle & VME_PROG)
1086 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1087
1088 /* Setup mapping */
1089 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1090 TSI148_LCSR_OFFSET_OTSAU);
1091 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1092 TSI148_LCSR_OFFSET_OTSAL);
1093 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1094 TSI148_LCSR_OFFSET_OTEAU);
1095 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1096 TSI148_LCSR_OFFSET_OTEAL);
1097 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1098 TSI148_LCSR_OFFSET_OTOFU);
1099 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1100 TSI148_LCSR_OFFSET_OTOFL);
1101
1102 /* Write ctl reg without enable */
1103 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1104 TSI148_LCSR_OFFSET_OTAT);
1105
1106 if (enabled)
1107 temp_ctl |= TSI148_LCSR_OTAT_EN;
1108
1109 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1110 TSI148_LCSR_OFFSET_OTAT);
1111
1112 spin_unlock(&image->lock);
1113 return 0;
1114
1115 err_aspace:
1116 err_dwidth:
1117 err_gran:
1118 tsi148_free_resource(image);
1119 err_res:
1120 err_window:
1121 return retval;
1122
1123 }
1124
1125 /*
1126 * Set the attributes of an outbound window.
1127 *
1128 * XXX Not parsing prefetch information.
1129 */
1130 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1131 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1132 u32 *cycle, u32 *dwidth)
1133 {
1134 unsigned int i, ctl;
1135 unsigned int pci_base_low, pci_base_high;
1136 unsigned int pci_bound_low, pci_bound_high;
1137 unsigned int vme_offset_low, vme_offset_high;
1138
1139 unsigned long long pci_base, pci_bound, vme_offset;
1140 struct tsi148_driver *bridge;
1141
1142 bridge = image->parent->driver_priv;
1143
1144 i = image->number;
1145
1146 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1147 TSI148_LCSR_OFFSET_OTAT);
1148
1149 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1150 TSI148_LCSR_OFFSET_OTSAU);
1151 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1152 TSI148_LCSR_OFFSET_OTSAL);
1153 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1154 TSI148_LCSR_OFFSET_OTEAU);
1155 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1156 TSI148_LCSR_OFFSET_OTEAL);
1157 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1158 TSI148_LCSR_OFFSET_OTOFU);
1159 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1160 TSI148_LCSR_OFFSET_OTOFL);
1161
1162 /* Convert 64-bit variables to 2x 32-bit variables */
1163 reg_join(pci_base_high, pci_base_low, &pci_base);
1164 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1165 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1166
1167 *vme_base = pci_base + vme_offset;
1168 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1169
1170 *enabled = 0;
1171 *aspace = 0;
1172 *cycle = 0;
1173 *dwidth = 0;
1174
1175 if (ctl & TSI148_LCSR_OTAT_EN)
1176 *enabled = 1;
1177
1178 /* Setup address space */
1179 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1180 *aspace |= VME_A16;
1181 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1182 *aspace |= VME_A24;
1183 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1184 *aspace |= VME_A32;
1185 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1186 *aspace |= VME_A64;
1187 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1188 *aspace |= VME_CRCSR;
1189 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1190 *aspace |= VME_USER1;
1191 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1192 *aspace |= VME_USER2;
1193 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1194 *aspace |= VME_USER3;
1195 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1196 *aspace |= VME_USER4;
1197
1198 /* Setup 2eSST speeds */
1199 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1200 *cycle |= VME_2eSST160;
1201 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1202 *cycle |= VME_2eSST267;
1203 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1204 *cycle |= VME_2eSST320;
1205
1206 /* Setup cycle types */
1207 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1208 *cycle |= VME_SCT;
1209 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1210 *cycle |= VME_BLT;
1211 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1212 *cycle |= VME_MBLT;
1213 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1214 *cycle |= VME_2eVME;
1215 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1216 *cycle |= VME_2eSST;
1217 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1218 *cycle |= VME_2eSSTB;
1219
1220 if (ctl & TSI148_LCSR_OTAT_SUP)
1221 *cycle |= VME_SUPER;
1222 else
1223 *cycle |= VME_USER;
1224
1225 if (ctl & TSI148_LCSR_OTAT_PGM)
1226 *cycle |= VME_PROG;
1227 else
1228 *cycle |= VME_DATA;
1229
1230 /* Setup data width */
1231 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1232 *dwidth = VME_D16;
1233 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1234 *dwidth = VME_D32;
1235
1236 return 0;
1237 }
1238
1239
1240 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1241 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1242 u32 *cycle, u32 *dwidth)
1243 {
1244 int retval;
1245
1246 spin_lock(&image->lock);
1247
1248 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1249 cycle, dwidth);
1250
1251 spin_unlock(&image->lock);
1252
1253 return retval;
1254 }
1255
1256 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1257 size_t count, loff_t offset)
1258 {
1259 int retval, enabled;
1260 unsigned long long vme_base, size;
1261 u32 aspace, cycle, dwidth;
1262 struct vme_bus_error *vme_err = NULL;
1263 struct vme_bridge *tsi148_bridge;
1264 void *addr = image->kern_base + offset;
1265 unsigned int done = 0;
1266 unsigned int count32;
1267
1268 tsi148_bridge = image->parent;
1269
1270 spin_lock(&image->lock);
1271
1272 /* The following code handles VME address alignment. We cannot use
1273 * memcpy_xxx directly here because it may cut small data transfers in
1274 * to 8-bit cycles, thus making D16 cycle impossible.
1275 * On the other hand, the bridge itself assures that the maximum data
1276 * cycle configured for the transfer is used and splits it
1277 * automatically for non-aligned addresses, so we don't want the
1278 * overhead of needlessly forcing small transfers for the entire cycle.
1279 */
1280 if ((uintptr_t)addr & 0x1) {
1281 *(u8 *)buf = ioread8(addr);
1282 done += 1;
1283 if (done == count)
1284 goto out;
1285 }
1286 if ((uintptr_t)addr & 0x2) {
1287 if ((count - done) < 2) {
1288 *(u8 *)(buf + done) = ioread8(addr + done);
1289 done += 1;
1290 goto out;
1291 } else {
1292 *(u16 *)(buf + done) = ioread16(addr + done);
1293 done += 2;
1294 }
1295 }
1296
1297 count32 = (count - done) & ~0x3;
1298 if (count32 > 0) {
1299 memcpy_fromio(buf + done, addr + done, count32);
1300 done += count32;
1301 }
1302
1303 if ((count - done) & 0x2) {
1304 *(u16 *)(buf + done) = ioread16(addr + done);
1305 done += 2;
1306 }
1307 if ((count - done) & 0x1) {
1308 *(u8 *)(buf + done) = ioread8(addr + done);
1309 done += 1;
1310 }
1311
1312 out:
1313 retval = count;
1314
1315 if (!err_chk)
1316 goto skip_chk;
1317
1318 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1319 &dwidth);
1320
1321 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1322 count);
1323 if (vme_err != NULL) {
1324 dev_err(image->parent->parent, "First VME read error detected "
1325 "an at address 0x%llx\n", vme_err->address);
1326 retval = vme_err->address - (vme_base + offset);
1327 /* Clear down save errors in this address range */
1328 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1329 count);
1330 }
1331
1332 skip_chk:
1333 spin_unlock(&image->lock);
1334
1335 return retval;
1336 }
1337
1338
1339 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1340 size_t count, loff_t offset)
1341 {
1342 int retval = 0, enabled;
1343 unsigned long long vme_base, size;
1344 u32 aspace, cycle, dwidth;
1345 void *addr = image->kern_base + offset;
1346 unsigned int done = 0;
1347 unsigned int count32;
1348
1349 struct vme_bus_error *vme_err = NULL;
1350 struct vme_bridge *tsi148_bridge;
1351 struct tsi148_driver *bridge;
1352
1353 tsi148_bridge = image->parent;
1354
1355 bridge = tsi148_bridge->driver_priv;
1356
1357 spin_lock(&image->lock);
1358
1359 /* Here we apply for the same strategy we do in master_read
1360 * function in order to assure D16 cycle when required.
1361 */
1362 if ((uintptr_t)addr & 0x1) {
1363 iowrite8(*(u8 *)buf, addr);
1364 done += 1;
1365 if (done == count)
1366 goto out;
1367 }
1368 if ((uintptr_t)addr & 0x2) {
1369 if ((count - done) < 2) {
1370 iowrite8(*(u8 *)(buf + done), addr + done);
1371 done += 1;
1372 goto out;
1373 } else {
1374 iowrite16(*(u16 *)(buf + done), addr + done);
1375 done += 2;
1376 }
1377 }
1378
1379 count32 = (count - done) & ~0x3;
1380 if (count32 > 0) {
1381 memcpy_toio(addr + done, buf + done, count32);
1382 done += count32;
1383 }
1384
1385 if ((count - done) & 0x2) {
1386 iowrite16(*(u16 *)(buf + done), addr + done);
1387 done += 2;
1388 }
1389 if ((count - done) & 0x1) {
1390 iowrite8(*(u8 *)(buf + done), addr + done);
1391 done += 1;
1392 }
1393
1394 out:
1395 retval = count;
1396
1397 /*
1398 * Writes are posted. We need to do a read on the VME bus to flush out
1399 * all of the writes before we check for errors. We can't guarantee
1400 * that reading the data we have just written is safe. It is believed
1401 * that there isn't any read, write re-ordering, so we can read any
1402 * location in VME space, so lets read the Device ID from the tsi148's
1403 * own registers as mapped into CR/CSR space.
1404 *
1405 * We check for saved errors in the written address range/space.
1406 */
1407
1408 if (!err_chk)
1409 goto skip_chk;
1410
1411 /*
1412 * Get window info first, to maximise the time that the buffers may
1413 * fluch on their own
1414 */
1415 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1416 &dwidth);
1417
1418 ioread16(bridge->flush_image->kern_base + 0x7F000);
1419
1420 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1421 count);
1422 if (vme_err != NULL) {
1423 dev_warn(tsi148_bridge->parent, "First VME write error detected"
1424 " an at address 0x%llx\n", vme_err->address);
1425 retval = vme_err->address - (vme_base + offset);
1426 /* Clear down save errors in this address range */
1427 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1428 count);
1429 }
1430
1431 skip_chk:
1432 spin_unlock(&image->lock);
1433
1434 return retval;
1435 }
1436
1437 /*
1438 * Perform an RMW cycle on the VME bus.
1439 *
1440 * Requires a previously configured master window, returns final value.
1441 */
1442 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1443 unsigned int mask, unsigned int compare, unsigned int swap,
1444 loff_t offset)
1445 {
1446 unsigned long long pci_addr;
1447 unsigned int pci_addr_high, pci_addr_low;
1448 u32 tmp, result;
1449 int i;
1450 struct tsi148_driver *bridge;
1451
1452 bridge = image->parent->driver_priv;
1453
1454 /* Find the PCI address that maps to the desired VME address */
1455 i = image->number;
1456
1457 /* Locking as we can only do one of these at a time */
1458 mutex_lock(&bridge->vme_rmw);
1459
1460 /* Lock image */
1461 spin_lock(&image->lock);
1462
1463 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1464 TSI148_LCSR_OFFSET_OTSAU);
1465 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1466 TSI148_LCSR_OFFSET_OTSAL);
1467
1468 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1469 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1470
1471 /* Configure registers */
1472 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1473 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1474 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1475 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1476 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1477
1478 /* Enable RMW */
1479 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1480 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1481 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1482
1483 /* Kick process off with a read to the required address. */
1484 result = ioread32be(image->kern_base + offset);
1485
1486 /* Disable RMW */
1487 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1488 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1489 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1490
1491 spin_unlock(&image->lock);
1492
1493 mutex_unlock(&bridge->vme_rmw);
1494
1495 return result;
1496 }
1497
1498 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1499 u32 aspace, u32 cycle, u32 dwidth)
1500 {
1501 u32 val;
1502
1503 val = be32_to_cpu(*attr);
1504
1505 /* Setup 2eSST speeds */
1506 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1507 case VME_2eSST160:
1508 val |= TSI148_LCSR_DSAT_2eSSTM_160;
1509 break;
1510 case VME_2eSST267:
1511 val |= TSI148_LCSR_DSAT_2eSSTM_267;
1512 break;
1513 case VME_2eSST320:
1514 val |= TSI148_LCSR_DSAT_2eSSTM_320;
1515 break;
1516 }
1517
1518 /* Setup cycle types */
1519 if (cycle & VME_SCT)
1520 val |= TSI148_LCSR_DSAT_TM_SCT;
1521
1522 if (cycle & VME_BLT)
1523 val |= TSI148_LCSR_DSAT_TM_BLT;
1524
1525 if (cycle & VME_MBLT)
1526 val |= TSI148_LCSR_DSAT_TM_MBLT;
1527
1528 if (cycle & VME_2eVME)
1529 val |= TSI148_LCSR_DSAT_TM_2eVME;
1530
1531 if (cycle & VME_2eSST)
1532 val |= TSI148_LCSR_DSAT_TM_2eSST;
1533
1534 if (cycle & VME_2eSSTB) {
1535 dev_err(dev, "Currently not setting Broadcast Select "
1536 "Registers\n");
1537 val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1538 }
1539
1540 /* Setup data width */
1541 switch (dwidth) {
1542 case VME_D16:
1543 val |= TSI148_LCSR_DSAT_DBW_16;
1544 break;
1545 case VME_D32:
1546 val |= TSI148_LCSR_DSAT_DBW_32;
1547 break;
1548 default:
1549 dev_err(dev, "Invalid data width\n");
1550 return -EINVAL;
1551 }
1552
1553 /* Setup address space */
1554 switch (aspace) {
1555 case VME_A16:
1556 val |= TSI148_LCSR_DSAT_AMODE_A16;
1557 break;
1558 case VME_A24:
1559 val |= TSI148_LCSR_DSAT_AMODE_A24;
1560 break;
1561 case VME_A32:
1562 val |= TSI148_LCSR_DSAT_AMODE_A32;
1563 break;
1564 case VME_A64:
1565 val |= TSI148_LCSR_DSAT_AMODE_A64;
1566 break;
1567 case VME_CRCSR:
1568 val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1569 break;
1570 case VME_USER1:
1571 val |= TSI148_LCSR_DSAT_AMODE_USER1;
1572 break;
1573 case VME_USER2:
1574 val |= TSI148_LCSR_DSAT_AMODE_USER2;
1575 break;
1576 case VME_USER3:
1577 val |= TSI148_LCSR_DSAT_AMODE_USER3;
1578 break;
1579 case VME_USER4:
1580 val |= TSI148_LCSR_DSAT_AMODE_USER4;
1581 break;
1582 default:
1583 dev_err(dev, "Invalid address space\n");
1584 return -EINVAL;
1585 break;
1586 }
1587
1588 if (cycle & VME_SUPER)
1589 val |= TSI148_LCSR_DSAT_SUP;
1590 if (cycle & VME_PROG)
1591 val |= TSI148_LCSR_DSAT_PGM;
1592
1593 *attr = cpu_to_be32(val);
1594
1595 return 0;
1596 }
1597
1598 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1599 u32 aspace, u32 cycle, u32 dwidth)
1600 {
1601 u32 val;
1602
1603 val = be32_to_cpu(*attr);
1604
1605 /* Setup 2eSST speeds */
1606 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1607 case VME_2eSST160:
1608 val |= TSI148_LCSR_DDAT_2eSSTM_160;
1609 break;
1610 case VME_2eSST267:
1611 val |= TSI148_LCSR_DDAT_2eSSTM_267;
1612 break;
1613 case VME_2eSST320:
1614 val |= TSI148_LCSR_DDAT_2eSSTM_320;
1615 break;
1616 }
1617
1618 /* Setup cycle types */
1619 if (cycle & VME_SCT)
1620 val |= TSI148_LCSR_DDAT_TM_SCT;
1621
1622 if (cycle & VME_BLT)
1623 val |= TSI148_LCSR_DDAT_TM_BLT;
1624
1625 if (cycle & VME_MBLT)
1626 val |= TSI148_LCSR_DDAT_TM_MBLT;
1627
1628 if (cycle & VME_2eVME)
1629 val |= TSI148_LCSR_DDAT_TM_2eVME;
1630
1631 if (cycle & VME_2eSST)
1632 val |= TSI148_LCSR_DDAT_TM_2eSST;
1633
1634 if (cycle & VME_2eSSTB) {
1635 dev_err(dev, "Currently not setting Broadcast Select "
1636 "Registers\n");
1637 val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1638 }
1639
1640 /* Setup data width */
1641 switch (dwidth) {
1642 case VME_D16:
1643 val |= TSI148_LCSR_DDAT_DBW_16;
1644 break;
1645 case VME_D32:
1646 val |= TSI148_LCSR_DDAT_DBW_32;
1647 break;
1648 default:
1649 dev_err(dev, "Invalid data width\n");
1650 return -EINVAL;
1651 }
1652
1653 /* Setup address space */
1654 switch (aspace) {
1655 case VME_A16:
1656 val |= TSI148_LCSR_DDAT_AMODE_A16;
1657 break;
1658 case VME_A24:
1659 val |= TSI148_LCSR_DDAT_AMODE_A24;
1660 break;
1661 case VME_A32:
1662 val |= TSI148_LCSR_DDAT_AMODE_A32;
1663 break;
1664 case VME_A64:
1665 val |= TSI148_LCSR_DDAT_AMODE_A64;
1666 break;
1667 case VME_CRCSR:
1668 val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1669 break;
1670 case VME_USER1:
1671 val |= TSI148_LCSR_DDAT_AMODE_USER1;
1672 break;
1673 case VME_USER2:
1674 val |= TSI148_LCSR_DDAT_AMODE_USER2;
1675 break;
1676 case VME_USER3:
1677 val |= TSI148_LCSR_DDAT_AMODE_USER3;
1678 break;
1679 case VME_USER4:
1680 val |= TSI148_LCSR_DDAT_AMODE_USER4;
1681 break;
1682 default:
1683 dev_err(dev, "Invalid address space\n");
1684 return -EINVAL;
1685 break;
1686 }
1687
1688 if (cycle & VME_SUPER)
1689 val |= TSI148_LCSR_DDAT_SUP;
1690 if (cycle & VME_PROG)
1691 val |= TSI148_LCSR_DDAT_PGM;
1692
1693 *attr = cpu_to_be32(val);
1694
1695 return 0;
1696 }
1697
1698 /*
1699 * Add a link list descriptor to the list
1700 *
1701 * Note: DMA engine expects the DMA descriptor to be big endian.
1702 */
1703 static int tsi148_dma_list_add(struct vme_dma_list *list,
1704 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1705 {
1706 struct tsi148_dma_entry *entry, *prev;
1707 u32 address_high, address_low, val;
1708 struct vme_dma_pattern *pattern_attr;
1709 struct vme_dma_pci *pci_attr;
1710 struct vme_dma_vme *vme_attr;
1711 int retval = 0;
1712 struct vme_bridge *tsi148_bridge;
1713
1714 tsi148_bridge = list->parent->parent;
1715
1716 /* Descriptor must be aligned on 64-bit boundaries */
1717 entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1718 if (entry == NULL) {
1719 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
1720 "dma resource structure\n");
1721 retval = -ENOMEM;
1722 goto err_mem;
1723 }
1724
1725 /* Test descriptor alignment */
1726 if ((unsigned long)&entry->descriptor & 0x7) {
1727 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1728 "byte boundary as required: %p\n",
1729 &entry->descriptor);
1730 retval = -EINVAL;
1731 goto err_align;
1732 }
1733
1734 /* Given we are going to fill out the structure, we probably don't
1735 * need to zero it, but better safe than sorry for now.
1736 */
1737 memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
1738
1739 /* Fill out source part */
1740 switch (src->type) {
1741 case VME_DMA_PATTERN:
1742 pattern_attr = src->private;
1743
1744 entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1745
1746 val = TSI148_LCSR_DSAT_TYP_PAT;
1747
1748 /* Default behaviour is 32 bit pattern */
1749 if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1750 val |= TSI148_LCSR_DSAT_PSZ;
1751
1752 /* It seems that the default behaviour is to increment */
1753 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1754 val |= TSI148_LCSR_DSAT_NIN;
1755 entry->descriptor.dsat = cpu_to_be32(val);
1756 break;
1757 case VME_DMA_PCI:
1758 pci_attr = src->private;
1759
1760 reg_split((unsigned long long)pci_attr->address, &address_high,
1761 &address_low);
1762 entry->descriptor.dsau = cpu_to_be32(address_high);
1763 entry->descriptor.dsal = cpu_to_be32(address_low);
1764 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1765 break;
1766 case VME_DMA_VME:
1767 vme_attr = src->private;
1768
1769 reg_split((unsigned long long)vme_attr->address, &address_high,
1770 &address_low);
1771 entry->descriptor.dsau = cpu_to_be32(address_high);
1772 entry->descriptor.dsal = cpu_to_be32(address_low);
1773 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1774
1775 retval = tsi148_dma_set_vme_src_attributes(
1776 tsi148_bridge->parent, &entry->descriptor.dsat,
1777 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1778 if (retval < 0)
1779 goto err_source;
1780 break;
1781 default:
1782 dev_err(tsi148_bridge->parent, "Invalid source type\n");
1783 retval = -EINVAL;
1784 goto err_source;
1785 break;
1786 }
1787
1788 /* Assume last link - this will be over-written by adding another */
1789 entry->descriptor.dnlau = cpu_to_be32(0);
1790 entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1791
1792 /* Fill out destination part */
1793 switch (dest->type) {
1794 case VME_DMA_PCI:
1795 pci_attr = dest->private;
1796
1797 reg_split((unsigned long long)pci_attr->address, &address_high,
1798 &address_low);
1799 entry->descriptor.ddau = cpu_to_be32(address_high);
1800 entry->descriptor.ddal = cpu_to_be32(address_low);
1801 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1802 break;
1803 case VME_DMA_VME:
1804 vme_attr = dest->private;
1805
1806 reg_split((unsigned long long)vme_attr->address, &address_high,
1807 &address_low);
1808 entry->descriptor.ddau = cpu_to_be32(address_high);
1809 entry->descriptor.ddal = cpu_to_be32(address_low);
1810 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1811
1812 retval = tsi148_dma_set_vme_dest_attributes(
1813 tsi148_bridge->parent, &entry->descriptor.ddat,
1814 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1815 if (retval < 0)
1816 goto err_dest;
1817 break;
1818 default:
1819 dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1820 retval = -EINVAL;
1821 goto err_dest;
1822 break;
1823 }
1824
1825 /* Fill out count */
1826 entry->descriptor.dcnt = cpu_to_be32((u32)count);
1827
1828 /* Add to list */
1829 list_add_tail(&entry->list, &list->entries);
1830
1831 /* Fill out previous descriptors "Next Address" */
1832 if (entry->list.prev != &list->entries) {
1833 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1834 list);
1835 /* We need the bus address for the pointer */
1836 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1837 &entry->descriptor,
1838 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1839
1840 reg_split((unsigned long long)entry->dma_handle, &address_high,
1841 &address_low);
1842 entry->descriptor.dnlau = cpu_to_be32(address_high);
1843 entry->descriptor.dnlal = cpu_to_be32(address_low);
1844
1845 }
1846
1847 return 0;
1848
1849 err_dest:
1850 err_source:
1851 err_align:
1852 kfree(entry);
1853 err_mem:
1854 return retval;
1855 }
1856
1857 /*
1858 * Check to see if the provided DMA channel is busy.
1859 */
1860 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1861 {
1862 u32 tmp;
1863 struct tsi148_driver *bridge;
1864
1865 bridge = tsi148_bridge->driver_priv;
1866
1867 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1868 TSI148_LCSR_OFFSET_DSTA);
1869
1870 if (tmp & TSI148_LCSR_DSTA_BSY)
1871 return 0;
1872 else
1873 return 1;
1874
1875 }
1876
1877 /*
1878 * Execute a previously generated link list
1879 *
1880 * XXX Need to provide control register configuration.
1881 */
1882 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1883 {
1884 struct vme_dma_resource *ctrlr;
1885 int channel, retval = 0;
1886 struct tsi148_dma_entry *entry;
1887 u32 bus_addr_high, bus_addr_low;
1888 u32 val, dctlreg = 0;
1889 struct vme_bridge *tsi148_bridge;
1890 struct tsi148_driver *bridge;
1891
1892 ctrlr = list->parent;
1893
1894 tsi148_bridge = ctrlr->parent;
1895
1896 bridge = tsi148_bridge->driver_priv;
1897
1898 mutex_lock(&ctrlr->mtx);
1899
1900 channel = ctrlr->number;
1901
1902 if (!list_empty(&ctrlr->running)) {
1903 /*
1904 * XXX We have an active DMA transfer and currently haven't
1905 * sorted out the mechanism for "pending" DMA transfers.
1906 * Return busy.
1907 */
1908 /* Need to add to pending here */
1909 mutex_unlock(&ctrlr->mtx);
1910 return -EBUSY;
1911 } else {
1912 list_add(&list->list, &ctrlr->running);
1913 }
1914
1915 /* Get first bus address and write into registers */
1916 entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1917 list);
1918
1919 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1920 &entry->descriptor,
1921 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1922
1923 mutex_unlock(&ctrlr->mtx);
1924
1925 reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1926
1927 iowrite32be(bus_addr_high, bridge->base +
1928 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1929 iowrite32be(bus_addr_low, bridge->base +
1930 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1931
1932 dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1933 TSI148_LCSR_OFFSET_DCTL);
1934
1935 /* Start the operation */
1936 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1937 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1938
1939 wait_event_interruptible(bridge->dma_queue[channel],
1940 tsi148_dma_busy(ctrlr->parent, channel));
1941
1942 /*
1943 * Read status register, this register is valid until we kick off a
1944 * new transfer.
1945 */
1946 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1947 TSI148_LCSR_OFFSET_DSTA);
1948
1949 if (val & TSI148_LCSR_DSTA_VBE) {
1950 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1951 retval = -EIO;
1952 }
1953
1954 /* Remove list from running list */
1955 mutex_lock(&ctrlr->mtx);
1956 list_del(&list->list);
1957 mutex_unlock(&ctrlr->mtx);
1958
1959 return retval;
1960 }
1961
1962 /*
1963 * Clean up a previously generated link list
1964 *
1965 * We have a separate function, don't assume that the chain can't be reused.
1966 */
1967 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1968 {
1969 struct list_head *pos, *temp;
1970 struct tsi148_dma_entry *entry;
1971
1972 struct vme_bridge *tsi148_bridge = list->parent->parent;
1973
1974 /* detach and free each entry */
1975 list_for_each_safe(pos, temp, &list->entries) {
1976 list_del(pos);
1977 entry = list_entry(pos, struct tsi148_dma_entry, list);
1978
1979 dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1980 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1981 kfree(entry);
1982 }
1983
1984 return 0;
1985 }
1986
1987 /*
1988 * All 4 location monitors reside at the same base - this is therefore a
1989 * system wide configuration.
1990 *
1991 * This does not enable the LM monitor - that should be done when the first
1992 * callback is attached and disabled when the last callback is removed.
1993 */
1994 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1995 u32 aspace, u32 cycle)
1996 {
1997 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1998 int i;
1999 struct vme_bridge *tsi148_bridge;
2000 struct tsi148_driver *bridge;
2001
2002 tsi148_bridge = lm->parent;
2003
2004 bridge = tsi148_bridge->driver_priv;
2005
2006 mutex_lock(&lm->mtx);
2007
2008 /* If we already have a callback attached, we can't move it! */
2009 for (i = 0; i < lm->monitors; i++) {
2010 if (bridge->lm_callback[i] != NULL) {
2011 mutex_unlock(&lm->mtx);
2012 dev_err(tsi148_bridge->parent, "Location monitor "
2013 "callback attached, can't reset\n");
2014 return -EBUSY;
2015 }
2016 }
2017
2018 switch (aspace) {
2019 case VME_A16:
2020 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
2021 break;
2022 case VME_A24:
2023 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
2024 break;
2025 case VME_A32:
2026 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
2027 break;
2028 case VME_A64:
2029 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
2030 break;
2031 default:
2032 mutex_unlock(&lm->mtx);
2033 dev_err(tsi148_bridge->parent, "Invalid address space\n");
2034 return -EINVAL;
2035 break;
2036 }
2037
2038 if (cycle & VME_SUPER)
2039 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
2040 if (cycle & VME_USER)
2041 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
2042 if (cycle & VME_PROG)
2043 lm_ctl |= TSI148_LCSR_LMAT_PGM;
2044 if (cycle & VME_DATA)
2045 lm_ctl |= TSI148_LCSR_LMAT_DATA;
2046
2047 reg_split(lm_base, &lm_base_high, &lm_base_low);
2048
2049 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
2050 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
2051 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2052
2053 mutex_unlock(&lm->mtx);
2054
2055 return 0;
2056 }
2057
2058 /* Get configuration of the callback monitor and return whether it is enabled
2059 * or disabled.
2060 */
2061 static int tsi148_lm_get(struct vme_lm_resource *lm,
2062 unsigned long long *lm_base, u32 *aspace, u32 *cycle)
2063 {
2064 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
2065 struct tsi148_driver *bridge;
2066
2067 bridge = lm->parent->driver_priv;
2068
2069 mutex_lock(&lm->mtx);
2070
2071 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
2072 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
2073 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2074
2075 reg_join(lm_base_high, lm_base_low, lm_base);
2076
2077 if (lm_ctl & TSI148_LCSR_LMAT_EN)
2078 enabled = 1;
2079
2080 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
2081 *aspace |= VME_A16;
2082
2083 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
2084 *aspace |= VME_A24;
2085
2086 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
2087 *aspace |= VME_A32;
2088
2089 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
2090 *aspace |= VME_A64;
2091
2092
2093 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2094 *cycle |= VME_SUPER;
2095 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2096 *cycle |= VME_USER;
2097 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2098 *cycle |= VME_PROG;
2099 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2100 *cycle |= VME_DATA;
2101
2102 mutex_unlock(&lm->mtx);
2103
2104 return enabled;
2105 }
2106
2107 /*
2108 * Attach a callback to a specific location monitor.
2109 *
2110 * Callback will be passed the monitor triggered.
2111 */
2112 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2113 void (*callback)(int))
2114 {
2115 u32 lm_ctl, tmp;
2116 struct vme_bridge *tsi148_bridge;
2117 struct tsi148_driver *bridge;
2118
2119 tsi148_bridge = lm->parent;
2120
2121 bridge = tsi148_bridge->driver_priv;
2122
2123 mutex_lock(&lm->mtx);
2124
2125 /* Ensure that the location monitor is configured - need PGM or DATA */
2126 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2127 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2128 mutex_unlock(&lm->mtx);
2129 dev_err(tsi148_bridge->parent, "Location monitor not properly "
2130 "configured\n");
2131 return -EINVAL;
2132 }
2133
2134 /* Check that a callback isn't already attached */
2135 if (bridge->lm_callback[monitor] != NULL) {
2136 mutex_unlock(&lm->mtx);
2137 dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2138 return -EBUSY;
2139 }
2140
2141 /* Attach callback */
2142 bridge->lm_callback[monitor] = callback;
2143
2144 /* Enable Location Monitor interrupt */
2145 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2146 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2147 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2148
2149 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2150 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2151 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2152
2153 /* Ensure that global Location Monitor Enable set */
2154 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2155 lm_ctl |= TSI148_LCSR_LMAT_EN;
2156 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2157 }
2158
2159 mutex_unlock(&lm->mtx);
2160
2161 return 0;
2162 }
2163
2164 /*
2165 * Detach a callback function forn a specific location monitor.
2166 */
2167 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2168 {
2169 u32 lm_en, tmp;
2170 struct tsi148_driver *bridge;
2171
2172 bridge = lm->parent->driver_priv;
2173
2174 mutex_lock(&lm->mtx);
2175
2176 /* Disable Location Monitor and ensure previous interrupts are clear */
2177 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2178 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2179 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2180
2181 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2182 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2183 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2184
2185 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2186 bridge->base + TSI148_LCSR_INTC);
2187
2188 /* Detach callback */
2189 bridge->lm_callback[monitor] = NULL;
2190
2191 /* If all location monitors disabled, disable global Location Monitor */
2192 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2193 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2194 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2195 tmp &= ~TSI148_LCSR_LMAT_EN;
2196 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2197 }
2198
2199 mutex_unlock(&lm->mtx);
2200
2201 return 0;
2202 }
2203
2204 /*
2205 * Determine Geographical Addressing
2206 */
2207 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2208 {
2209 u32 slot = 0;
2210 struct tsi148_driver *bridge;
2211
2212 bridge = tsi148_bridge->driver_priv;
2213
2214 if (!geoid) {
2215 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2216 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2217 } else
2218 slot = geoid;
2219
2220 return (int)slot;
2221 }
2222
2223 static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2224 dma_addr_t *dma)
2225 {
2226 struct pci_dev *pdev;
2227
2228 /* Find pci_dev container of dev */
2229 pdev = container_of(parent, struct pci_dev, dev);
2230
2231 return pci_alloc_consistent(pdev, size, dma);
2232 }
2233
2234 static void tsi148_free_consistent(struct device *parent, size_t size,
2235 void *vaddr, dma_addr_t dma)
2236 {
2237 struct pci_dev *pdev;
2238
2239 /* Find pci_dev container of dev */
2240 pdev = container_of(parent, struct pci_dev, dev);
2241
2242 pci_free_consistent(pdev, size, vaddr, dma);
2243 }
2244
2245 /*
2246 * Configure CR/CSR space
2247 *
2248 * Access to the CR/CSR can be configured at power-up. The location of the
2249 * CR/CSR registers in the CR/CSR address space is determined by the boards
2250 * Auto-ID or Geographic address. This function ensures that the window is
2251 * enabled at an offset consistent with the boards geopgraphic address.
2252 *
2253 * Each board has a 512kB window, with the highest 4kB being used for the
2254 * boards registers, this means there is a fix length 508kB window which must
2255 * be mapped onto PCI memory.
2256 */
2257 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2258 struct pci_dev *pdev)
2259 {
2260 u32 cbar, crat, vstat;
2261 u32 crcsr_bus_high, crcsr_bus_low;
2262 int retval;
2263 struct tsi148_driver *bridge;
2264
2265 bridge = tsi148_bridge->driver_priv;
2266
2267 /* Allocate mem for CR/CSR image */
2268 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2269 &bridge->crcsr_bus);
2270 if (bridge->crcsr_kernel == NULL) {
2271 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2272 "CR/CSR image\n");
2273 return -ENOMEM;
2274 }
2275
2276 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2277
2278 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2279
2280 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2281 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2282
2283 /* Ensure that the CR/CSR is configured at the correct offset */
2284 cbar = ioread32be(bridge->base + TSI148_CBAR);
2285 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2286
2287 vstat = tsi148_slot_get(tsi148_bridge);
2288
2289 if (cbar != vstat) {
2290 cbar = vstat;
2291 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2292 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2293 }
2294 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2295
2296 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2297 if (crat & TSI148_LCSR_CRAT_EN) {
2298 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2299 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2300 bridge->base + TSI148_LCSR_CRAT);
2301 } else
2302 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2303
2304 /* If we want flushed, error-checked writes, set up a window
2305 * over the CR/CSR registers. We read from here to safely flush
2306 * through VME writes.
2307 */
2308 if (err_chk) {
2309 retval = tsi148_master_set(bridge->flush_image, 1,
2310 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2311 VME_D16);
2312 if (retval)
2313 dev_err(tsi148_bridge->parent, "Configuring flush image"
2314 " failed\n");
2315 }
2316
2317 return 0;
2318
2319 }
2320
2321 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2322 struct pci_dev *pdev)
2323 {
2324 u32 crat;
2325 struct tsi148_driver *bridge;
2326
2327 bridge = tsi148_bridge->driver_priv;
2328
2329 /* Turn off CR/CSR space */
2330 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2331 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2332 bridge->base + TSI148_LCSR_CRAT);
2333
2334 /* Free image */
2335 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2336 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2337
2338 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2339 bridge->crcsr_bus);
2340 }
2341
2342 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2343 {
2344 int retval, i, master_num;
2345 u32 data;
2346 struct list_head *pos = NULL, *n;
2347 struct vme_bridge *tsi148_bridge;
2348 struct tsi148_driver *tsi148_device;
2349 struct vme_master_resource *master_image;
2350 struct vme_slave_resource *slave_image;
2351 struct vme_dma_resource *dma_ctrlr;
2352 struct vme_lm_resource *lm;
2353
2354 /* If we want to support more than one of each bridge, we need to
2355 * dynamically generate this so we get one per device
2356 */
2357 tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
2358 if (tsi148_bridge == NULL) {
2359 dev_err(&pdev->dev, "Failed to allocate memory for device "
2360 "structure\n");
2361 retval = -ENOMEM;
2362 goto err_struct;
2363 }
2364
2365 tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2366 if (tsi148_device == NULL) {
2367 dev_err(&pdev->dev, "Failed to allocate memory for device "
2368 "structure\n");
2369 retval = -ENOMEM;
2370 goto err_driver;
2371 }
2372
2373 tsi148_bridge->driver_priv = tsi148_device;
2374
2375 /* Enable the device */
2376 retval = pci_enable_device(pdev);
2377 if (retval) {
2378 dev_err(&pdev->dev, "Unable to enable device\n");
2379 goto err_enable;
2380 }
2381
2382 /* Map Registers */
2383 retval = pci_request_regions(pdev, driver_name);
2384 if (retval) {
2385 dev_err(&pdev->dev, "Unable to reserve resources\n");
2386 goto err_resource;
2387 }
2388
2389 /* map registers in BAR 0 */
2390 tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2391 4096);
2392 if (!tsi148_device->base) {
2393 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2394 retval = -EIO;
2395 goto err_remap;
2396 }
2397
2398 /* Check to see if the mapping worked out */
2399 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2400 if (data != PCI_VENDOR_ID_TUNDRA) {
2401 dev_err(&pdev->dev, "CRG region check failed\n");
2402 retval = -EIO;
2403 goto err_test;
2404 }
2405
2406 /* Initialize wait queues & mutual exclusion flags */
2407 init_waitqueue_head(&tsi148_device->dma_queue[0]);
2408 init_waitqueue_head(&tsi148_device->dma_queue[1]);
2409 init_waitqueue_head(&tsi148_device->iack_queue);
2410 mutex_init(&tsi148_device->vme_int);
2411 mutex_init(&tsi148_device->vme_rmw);
2412
2413 tsi148_bridge->parent = &pdev->dev;
2414 strcpy(tsi148_bridge->name, driver_name);
2415
2416 /* Setup IRQ */
2417 retval = tsi148_irq_init(tsi148_bridge);
2418 if (retval != 0) {
2419 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2420 goto err_irq;
2421 }
2422
2423 /* If we are going to flush writes, we need to read from the VME bus.
2424 * We need to do this safely, thus we read the devices own CR/CSR
2425 * register. To do this we must set up a window in CR/CSR space and
2426 * hence have one less master window resource available.
2427 */
2428 master_num = TSI148_MAX_MASTER;
2429 if (err_chk) {
2430 master_num--;
2431
2432 tsi148_device->flush_image =
2433 kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2434 if (tsi148_device->flush_image == NULL) {
2435 dev_err(&pdev->dev, "Failed to allocate memory for "
2436 "flush resource structure\n");
2437 retval = -ENOMEM;
2438 goto err_master;
2439 }
2440 tsi148_device->flush_image->parent = tsi148_bridge;
2441 spin_lock_init(&tsi148_device->flush_image->lock);
2442 tsi148_device->flush_image->locked = 1;
2443 tsi148_device->flush_image->number = master_num;
2444 tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
2445 VME_A32 | VME_A64;
2446 tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
2447 VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
2448 VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
2449 VME_USER | VME_PROG | VME_DATA;
2450 tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
2451 memset(&tsi148_device->flush_image->bus_resource, 0,
2452 sizeof(struct resource));
2453 tsi148_device->flush_image->kern_base = NULL;
2454 }
2455
2456 /* Add master windows to list */
2457 INIT_LIST_HEAD(&tsi148_bridge->master_resources);
2458 for (i = 0; i < master_num; i++) {
2459 master_image = kmalloc(sizeof(struct vme_master_resource),
2460 GFP_KERNEL);
2461 if (master_image == NULL) {
2462 dev_err(&pdev->dev, "Failed to allocate memory for "
2463 "master resource structure\n");
2464 retval = -ENOMEM;
2465 goto err_master;
2466 }
2467 master_image->parent = tsi148_bridge;
2468 spin_lock_init(&master_image->lock);
2469 master_image->locked = 0;
2470 master_image->number = i;
2471 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2472 VME_A64;
2473 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2474 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2475 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2476 VME_PROG | VME_DATA;
2477 master_image->width_attr = VME_D16 | VME_D32;
2478 memset(&master_image->bus_resource, 0,
2479 sizeof(struct resource));
2480 master_image->kern_base = NULL;
2481 list_add_tail(&master_image->list,
2482 &tsi148_bridge->master_resources);
2483 }
2484
2485 /* Add slave windows to list */
2486 INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
2487 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2488 slave_image = kmalloc(sizeof(struct vme_slave_resource),
2489 GFP_KERNEL);
2490 if (slave_image == NULL) {
2491 dev_err(&pdev->dev, "Failed to allocate memory for "
2492 "slave resource structure\n");
2493 retval = -ENOMEM;
2494 goto err_slave;
2495 }
2496 slave_image->parent = tsi148_bridge;
2497 mutex_init(&slave_image->mtx);
2498 slave_image->locked = 0;
2499 slave_image->number = i;
2500 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2501 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2502 VME_USER3 | VME_USER4;
2503 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2504 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2505 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2506 VME_PROG | VME_DATA;
2507 list_add_tail(&slave_image->list,
2508 &tsi148_bridge->slave_resources);
2509 }
2510
2511 /* Add dma engines to list */
2512 INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
2513 for (i = 0; i < TSI148_MAX_DMA; i++) {
2514 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
2515 GFP_KERNEL);
2516 if (dma_ctrlr == NULL) {
2517 dev_err(&pdev->dev, "Failed to allocate memory for "
2518 "dma resource structure\n");
2519 retval = -ENOMEM;
2520 goto err_dma;
2521 }
2522 dma_ctrlr->parent = tsi148_bridge;
2523 mutex_init(&dma_ctrlr->mtx);
2524 dma_ctrlr->locked = 0;
2525 dma_ctrlr->number = i;
2526 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2527 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2528 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2529 VME_DMA_PATTERN_TO_MEM;
2530 INIT_LIST_HEAD(&dma_ctrlr->pending);
2531 INIT_LIST_HEAD(&dma_ctrlr->running);
2532 list_add_tail(&dma_ctrlr->list,
2533 &tsi148_bridge->dma_resources);
2534 }
2535
2536 /* Add location monitor to list */
2537 INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
2538 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2539 if (lm == NULL) {
2540 dev_err(&pdev->dev, "Failed to allocate memory for "
2541 "location monitor resource structure\n");
2542 retval = -ENOMEM;
2543 goto err_lm;
2544 }
2545 lm->parent = tsi148_bridge;
2546 mutex_init(&lm->mtx);
2547 lm->locked = 0;
2548 lm->number = 1;
2549 lm->monitors = 4;
2550 list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2551
2552 tsi148_bridge->slave_get = tsi148_slave_get;
2553 tsi148_bridge->slave_set = tsi148_slave_set;
2554 tsi148_bridge->master_get = tsi148_master_get;
2555 tsi148_bridge->master_set = tsi148_master_set;
2556 tsi148_bridge->master_read = tsi148_master_read;
2557 tsi148_bridge->master_write = tsi148_master_write;
2558 tsi148_bridge->master_rmw = tsi148_master_rmw;
2559 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2560 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2561 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2562 tsi148_bridge->irq_set = tsi148_irq_set;
2563 tsi148_bridge->irq_generate = tsi148_irq_generate;
2564 tsi148_bridge->lm_set = tsi148_lm_set;
2565 tsi148_bridge->lm_get = tsi148_lm_get;
2566 tsi148_bridge->lm_attach = tsi148_lm_attach;
2567 tsi148_bridge->lm_detach = tsi148_lm_detach;
2568 tsi148_bridge->slot_get = tsi148_slot_get;
2569 tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2570 tsi148_bridge->free_consistent = tsi148_free_consistent;
2571
2572 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2573 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2574 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2575 if (!geoid)
2576 dev_info(&pdev->dev, "VME geographical address is %d\n",
2577 data & TSI148_LCSR_VSTAT_GA_M);
2578 else
2579 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2580 geoid);
2581
2582 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2583 err_chk ? "enabled" : "disabled");
2584
2585 if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
2586 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2587 goto err_crcsr;
2588 }
2589
2590 retval = vme_register_bridge(tsi148_bridge);
2591 if (retval != 0) {
2592 dev_err(&pdev->dev, "Chip Registration failed.\n");
2593 goto err_reg;
2594 }
2595
2596 pci_set_drvdata(pdev, tsi148_bridge);
2597
2598 /* Clear VME bus "board fail", and "power-up reset" lines */
2599 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2600 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2601 data |= TSI148_LCSR_VSTAT_CPURST;
2602 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2603
2604 return 0;
2605
2606 err_reg:
2607 tsi148_crcsr_exit(tsi148_bridge, pdev);
2608 err_crcsr:
2609 err_lm:
2610 /* resources are stored in link list */
2611 list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
2612 lm = list_entry(pos, struct vme_lm_resource, list);
2613 list_del(pos);
2614 kfree(lm);
2615 }
2616 err_dma:
2617 /* resources are stored in link list */
2618 list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
2619 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2620 list_del(pos);
2621 kfree(dma_ctrlr);
2622 }
2623 err_slave:
2624 /* resources are stored in link list */
2625 list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
2626 slave_image = list_entry(pos, struct vme_slave_resource, list);
2627 list_del(pos);
2628 kfree(slave_image);
2629 }
2630 err_master:
2631 /* resources are stored in link list */
2632 list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
2633 master_image = list_entry(pos, struct vme_master_resource,
2634 list);
2635 list_del(pos);
2636 kfree(master_image);
2637 }
2638
2639 tsi148_irq_exit(tsi148_bridge, pdev);
2640 err_irq:
2641 err_test:
2642 iounmap(tsi148_device->base);
2643 err_remap:
2644 pci_release_regions(pdev);
2645 err_resource:
2646 pci_disable_device(pdev);
2647 err_enable:
2648 kfree(tsi148_device);
2649 err_driver:
2650 kfree(tsi148_bridge);
2651 err_struct:
2652 return retval;
2653
2654 }
2655
2656 static void tsi148_remove(struct pci_dev *pdev)
2657 {
2658 struct list_head *pos = NULL;
2659 struct list_head *tmplist;
2660 struct vme_master_resource *master_image;
2661 struct vme_slave_resource *slave_image;
2662 struct vme_dma_resource *dma_ctrlr;
2663 int i;
2664 struct tsi148_driver *bridge;
2665 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2666
2667 bridge = tsi148_bridge->driver_priv;
2668
2669
2670 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2671
2672 /*
2673 * Shutdown all inbound and outbound windows.
2674 */
2675 for (i = 0; i < 8; i++) {
2676 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2677 TSI148_LCSR_OFFSET_ITAT);
2678 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2679 TSI148_LCSR_OFFSET_OTAT);
2680 }
2681
2682 /*
2683 * Shutdown Location monitor.
2684 */
2685 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2686
2687 /*
2688 * Shutdown CRG map.
2689 */
2690 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2691
2692 /*
2693 * Clear error status.
2694 */
2695 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2696 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2697 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2698
2699 /*
2700 * Remove VIRQ interrupt (if any)
2701 */
2702 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2703 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2704
2705 /*
2706 * Map all Interrupts to PCI INTA
2707 */
2708 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2709 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2710
2711 tsi148_irq_exit(tsi148_bridge, pdev);
2712
2713 vme_unregister_bridge(tsi148_bridge);
2714
2715 tsi148_crcsr_exit(tsi148_bridge, pdev);
2716
2717 /* resources are stored in link list */
2718 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2719 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2720 list_del(pos);
2721 kfree(dma_ctrlr);
2722 }
2723
2724 /* resources are stored in link list */
2725 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2726 slave_image = list_entry(pos, struct vme_slave_resource, list);
2727 list_del(pos);
2728 kfree(slave_image);
2729 }
2730
2731 /* resources are stored in link list */
2732 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2733 master_image = list_entry(pos, struct vme_master_resource,
2734 list);
2735 list_del(pos);
2736 kfree(master_image);
2737 }
2738
2739 iounmap(bridge->base);
2740
2741 pci_release_regions(pdev);
2742
2743 pci_disable_device(pdev);
2744
2745 kfree(tsi148_bridge->driver_priv);
2746
2747 kfree(tsi148_bridge);
2748 }
2749
2750 module_pci_driver(tsi148_driver);
2751
2752 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2753 module_param(err_chk, bool, 0);
2754
2755 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2756 module_param(geoid, int, 0);
2757
2758 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2759 MODULE_LICENSE("GPL");