include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / vme / bridges / vme_tsi148.c
CommitLineData
d22b8ed9
MW
1/*
2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
3 *
66bd8db5
MW
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
d22b8ed9
MW
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
d22b8ed9
MW
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/mm.h>
19#include <linux/types.h>
20#include <linux/errno.h>
21#include <linux/proc_fs.h>
22#include <linux/pci.h>
23#include <linux/poll.h>
24#include <linux/dma-mapping.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
6af783c8 27#include <linux/sched.h>
5a0e3ad6 28#include <linux/slab.h>
d22b8ed9
MW
29#include <asm/time.h>
30#include <asm/io.h>
31#include <asm/uaccess.h>
32
33#include "../vme.h"
34#include "../vme_bridge.h"
35#include "vme_tsi148.h"
36
37static int __init tsi148_init(void);
38static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
39static void tsi148_remove(struct pci_dev *);
40static void __exit tsi148_exit(void);
41
42
43int tsi148_slave_set(struct vme_slave_resource *, int, unsigned long long,
44 unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
45int tsi148_slave_get(struct vme_slave_resource *, int *, unsigned long long *,
46 unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
47
48int tsi148_master_get(struct vme_master_resource *, int *, unsigned long long *,
49 unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
50int tsi148_master_set(struct vme_master_resource *, int, unsigned long long,
51 unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
52ssize_t tsi148_master_read(struct vme_master_resource *, void *, size_t,
53 loff_t);
54ssize_t tsi148_master_write(struct vme_master_resource *, void *, size_t,
55 loff_t);
56unsigned int tsi148_master_rmw(struct vme_master_resource *, unsigned int,
57 unsigned int, unsigned int, loff_t);
58int tsi148_dma_list_add (struct vme_dma_list *, struct vme_dma_attr *,
59 struct vme_dma_attr *, size_t);
60int tsi148_dma_list_exec(struct vme_dma_list *);
61int tsi148_dma_list_empty(struct vme_dma_list *);
62int tsi148_generate_irq(int, int);
d22b8ed9 63
29848ac9 64/* Module parameter */
42d4eff7 65static int err_chk;
638f199d 66static int geoid;
d22b8ed9 67
d22b8ed9
MW
68static char driver_name[] = "vme_tsi148";
69
13ac58da 70static const struct pci_device_id tsi148_ids[] = {
d22b8ed9
MW
71 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
72 { },
73};
74
75static struct pci_driver tsi148_driver = {
76 .name = driver_name,
77 .id_table = tsi148_ids,
78 .probe = tsi148_probe,
79 .remove = tsi148_remove,
80};
81
82static void reg_join(unsigned int high, unsigned int low,
83 unsigned long long *variable)
84{
85 *variable = (unsigned long long)high << 32;
86 *variable |= (unsigned long long)low;
87}
88
89static void reg_split(unsigned long long variable, unsigned int *high,
90 unsigned int *low)
91{
92 *low = (unsigned int)variable & 0xFFFFFFFF;
93 *high = (unsigned int)(variable >> 32);
94}
95
96/*
97 * Wakes up DMA queue.
98 */
29848ac9
MW
99static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
100 int channel_mask)
d22b8ed9
MW
101{
102 u32 serviced = 0;
103
104 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
29848ac9 105 wake_up(&(bridge->dma_queue[0]));
d22b8ed9
MW
106 serviced |= TSI148_LCSR_INTC_DMA0C;
107 }
108 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
29848ac9 109 wake_up(&(bridge->dma_queue[1]));
d22b8ed9
MW
110 serviced |= TSI148_LCSR_INTC_DMA1C;
111 }
112
113 return serviced;
114}
115
116/*
117 * Wake up location monitor queue
118 */
29848ac9 119static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
d22b8ed9
MW
120{
121 int i;
122 u32 serviced = 0;
123
124 for (i = 0; i < 4; i++) {
125 if(stat & TSI148_LCSR_INTS_LMS[i]) {
126 /* We only enable interrupts if the callback is set */
29848ac9 127 bridge->lm_callback[i](i);
d22b8ed9
MW
128 serviced |= TSI148_LCSR_INTC_LMC[i];
129 }
130 }
131
132 return serviced;
133}
134
135/*
136 * Wake up mail box queue.
137 *
138 * XXX This functionality is not exposed up though API.
139 */
29848ac9 140static u32 tsi148_MB_irqhandler(struct tsi148_driver *bridge, u32 stat)
d22b8ed9
MW
141{
142 int i;
143 u32 val;
144 u32 serviced = 0;
145
146 for (i = 0; i < 4; i++) {
147 if(stat & TSI148_LCSR_INTS_MBS[i]) {
29848ac9 148 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
d22b8ed9
MW
149 printk("VME Mailbox %d received: 0x%x\n", i, val);
150 serviced |= TSI148_LCSR_INTC_MBC[i];
151 }
152 }
153
154 return serviced;
155}
156
157/*
158 * Display error & status message when PERR (PCI) exception interrupt occurs.
159 */
29848ac9 160static u32 tsi148_PERR_irqhandler(struct tsi148_driver *bridge)
d22b8ed9
MW
161{
162 printk(KERN_ERR
163 "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
29848ac9
MW
164 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
165 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
166 ioread32be(bridge->base + TSI148_LCSR_EDPAT)
d22b8ed9
MW
167 );
168 printk(KERN_ERR
169 "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
29848ac9
MW
170 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
171 ioread32be(bridge->base + TSI148_LCSR_EDPXS)
d22b8ed9
MW
172 );
173
29848ac9 174 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
d22b8ed9
MW
175
176 return TSI148_LCSR_INTC_PERRC;
177}
178
179/*
180 * Save address and status when VME error interrupt occurs.
181 */
29848ac9 182static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
d22b8ed9
MW
183{
184 unsigned int error_addr_high, error_addr_low;
185 unsigned long long error_addr;
186 u32 error_attrib;
187 struct vme_bus_error *error;
29848ac9
MW
188 struct tsi148_driver *bridge;
189
190 bridge = tsi148_bridge->driver_priv;
d22b8ed9 191
29848ac9
MW
192 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
193 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
194 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
d22b8ed9
MW
195
196 reg_join(error_addr_high, error_addr_low, &error_addr);
197
198 /* Check for exception register overflow (we have lost error data) */
199 if(error_attrib & TSI148_LCSR_VEAT_VEOF) {
200 printk(KERN_ERR "VME Bus Exception Overflow Occurred\n");
201 }
202
203 error = (struct vme_bus_error *)kmalloc(sizeof (struct vme_bus_error),
204 GFP_ATOMIC);
205 if (error) {
206 error->address = error_addr;
207 error->attributes = error_attrib;
208 list_add_tail(&(error->list), &(tsi148_bridge->vme_errors));
209 } else {
210 printk(KERN_ERR
211 "Unable to alloc memory for VMEbus Error reporting\n");
212 printk(KERN_ERR
213 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
214 error_addr, error_attrib);
215 }
216
217 /* Clear Status */
29848ac9 218 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
d22b8ed9
MW
219
220 return TSI148_LCSR_INTC_VERRC;
221}
222
223/*
224 * Wake up IACK queue.
225 */
29848ac9 226static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
d22b8ed9 227{
29848ac9 228 wake_up(&(bridge->iack_queue));
d22b8ed9
MW
229
230 return TSI148_LCSR_INTC_IACKC;
231}
232
233/*
234 * Calling VME bus interrupt callback if provided.
235 */
29848ac9
MW
236static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
237 u32 stat)
d22b8ed9
MW
238{
239 int vec, i, serviced = 0;
29848ac9
MW
240 struct tsi148_driver *bridge;
241
242 bridge = tsi148_bridge->driver_priv;
d22b8ed9
MW
243
244 for (i = 7; i > 0; i--) {
245 if (stat & (1 << i)) {
246 /*
247 * Note: Even though the registers are defined
248 * as 32-bits in the spec, we only want to issue
249 * 8-bit IACK cycles on the bus, read from offset
250 * 3.
251 */
29848ac9 252 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
d22b8ed9 253
c813f592 254 vme_irq_handler(tsi148_bridge, i, vec);
d22b8ed9
MW
255
256 serviced |= (1 << i);
257 }
258 }
259
260 return serviced;
261}
262
263/*
264 * Top level interrupt handler. Clears appropriate interrupt status bits and
265 * then calls appropriate sub handler(s).
266 */
29848ac9 267static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
d22b8ed9
MW
268{
269 u32 stat, enable, serviced = 0;
29848ac9
MW
270 struct vme_bridge *tsi148_bridge;
271 struct tsi148_driver *bridge;
272
273 tsi148_bridge = ptr;
274
275 bridge = tsi148_bridge->driver_priv;
d22b8ed9
MW
276
277 /* Determine which interrupts are unmasked and set */
29848ac9
MW
278 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
279 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
d22b8ed9
MW
280
281 /* Only look at unmasked interrupts */
282 stat &= enable;
283
284 if (unlikely(!stat)) {
285 return IRQ_NONE;
286 }
287
288 /* Call subhandlers as appropriate */
289 /* DMA irqs */
290 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
29848ac9 291 serviced |= tsi148_DMA_irqhandler(bridge, stat);
d22b8ed9
MW
292
293 /* Location monitor irqs */
294 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
295 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
29848ac9 296 serviced |= tsi148_LM_irqhandler(bridge, stat);
d22b8ed9
MW
297
298 /* Mail box irqs */
299 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
300 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
29848ac9 301 serviced |= tsi148_MB_irqhandler(bridge, stat);
d22b8ed9
MW
302
303 /* PCI bus error */
304 if (stat & TSI148_LCSR_INTS_PERRS)
29848ac9 305 serviced |= tsi148_PERR_irqhandler(bridge);
d22b8ed9
MW
306
307 /* VME bus error */
308 if (stat & TSI148_LCSR_INTS_VERRS)
29848ac9 309 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
d22b8ed9
MW
310
311 /* IACK irq */
312 if (stat & TSI148_LCSR_INTS_IACKS)
29848ac9 313 serviced |= tsi148_IACK_irqhandler(bridge);
d22b8ed9
MW
314
315 /* VME bus irqs */
316 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
317 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
318 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
319 TSI148_LCSR_INTS_IRQ1S))
29848ac9 320 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
d22b8ed9
MW
321
322 /* Clear serviced interrupts */
29848ac9 323 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
d22b8ed9
MW
324
325 return IRQ_HANDLED;
326}
327
29848ac9 328static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
d22b8ed9
MW
329{
330 int result;
331 unsigned int tmp;
332 struct pci_dev *pdev;
29848ac9
MW
333 struct tsi148_driver *bridge;
334
335 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
d22b8ed9 336
29848ac9 337 bridge = tsi148_bridge->driver_priv;
d22b8ed9
MW
338
339 /* Initialise list for VME bus errors */
29848ac9 340 INIT_LIST_HEAD(&(tsi148_bridge->vme_errors));
d22b8ed9 341
29848ac9 342 mutex_init(&(tsi148_bridge->irq_mtx));
c813f592 343
d22b8ed9
MW
344 result = request_irq(pdev->irq,
345 tsi148_irqhandler,
346 IRQF_SHARED,
29848ac9 347 driver_name, tsi148_bridge);
d22b8ed9
MW
348 if (result) {
349 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
350 pdev->irq);
351 return result;
352 }
353
354 /* Enable and unmask interrupts */
355 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
356 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
357 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
358 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
359 TSI148_LCSR_INTEO_IACKEO;
360
29848ac9 361 /* This leaves the following interrupts masked.
d22b8ed9
MW
362 * TSI148_LCSR_INTEO_VIEEO
363 * TSI148_LCSR_INTEO_SYSFLEO
364 * TSI148_LCSR_INTEO_ACFLEO
365 */
366
367 /* Don't enable Location Monitor interrupts here - they will be
368 * enabled when the location monitors are properly configured and
369 * a callback has been attached.
370 * TSI148_LCSR_INTEO_LM0EO
371 * TSI148_LCSR_INTEO_LM1EO
372 * TSI148_LCSR_INTEO_LM2EO
373 * TSI148_LCSR_INTEO_LM3EO
374 */
375
376 /* Don't enable VME interrupts until we add a handler, else the board
377 * will respond to it and we don't want that unless it knows how to
378 * properly deal with it.
379 * TSI148_LCSR_INTEO_IRQ7EO
380 * TSI148_LCSR_INTEO_IRQ6EO
381 * TSI148_LCSR_INTEO_IRQ5EO
382 * TSI148_LCSR_INTEO_IRQ4EO
383 * TSI148_LCSR_INTEO_IRQ3EO
384 * TSI148_LCSR_INTEO_IRQ2EO
385 * TSI148_LCSR_INTEO_IRQ1EO
386 */
387
388 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
389 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
390
391 return 0;
392}
393
29848ac9 394static void tsi148_irq_exit(struct tsi148_driver *bridge, struct pci_dev *pdev)
d22b8ed9
MW
395{
396 /* Turn off interrupts */
29848ac9
MW
397 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
398 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
d22b8ed9
MW
399
400 /* Clear all interrupts */
29848ac9 401 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
d22b8ed9
MW
402
403 /* Detach interrupt handler */
404 free_irq(pdev->irq, pdev);
405}
406
407/*
408 * Check to see if an IACk has been received, return true (1) or false (0).
409 */
29848ac9 410int tsi148_iack_received(struct tsi148_driver *bridge)
d22b8ed9
MW
411{
412 u32 tmp;
413
29848ac9 414 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
d22b8ed9
MW
415
416 if (tmp & TSI148_LCSR_VICR_IRQS)
417 return 0;
418 else
419 return 1;
420}
421
422/*
c813f592 423 * Configure VME interrupt
d22b8ed9 424 */
29848ac9
MW
425void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
426 int state, int sync)
d22b8ed9 427{
75155020 428 struct pci_dev *pdev;
c813f592 429 u32 tmp;
29848ac9
MW
430 struct tsi148_driver *bridge;
431
432 bridge = tsi148_bridge->driver_priv;
d22b8ed9 433
c813f592
MW
434 /* We need to do the ordering differently for enabling and disabling */
435 if (state == 0) {
29848ac9 436 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
d22b8ed9 437 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
29848ac9 438 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
df455175 439
29848ac9 440 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
df455175 441 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
29848ac9 442 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
75155020 443
c813f592
MW
444 if (sync != 0) {
445 pdev = container_of(tsi148_bridge->parent,
446 struct pci_dev, dev);
75155020 447
c813f592
MW
448 synchronize_irq(pdev->irq);
449 }
450 } else {
29848ac9 451 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
c813f592 452 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
29848ac9 453 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
df455175 454
29848ac9 455 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
c813f592 456 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
29848ac9 457 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
c813f592 458 }
d22b8ed9
MW
459}
460
461/*
462 * Generate a VME bus interrupt at the requested level & vector. Wait for
463 * interrupt to be acked.
d22b8ed9 464 */
29848ac9 465int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, int statid)
d22b8ed9
MW
466{
467 u32 tmp;
29848ac9
MW
468 struct tsi148_driver *bridge;
469
470 bridge = tsi148_bridge->driver_priv;
d22b8ed9 471
29848ac9 472 mutex_lock(&(bridge->vme_int));
d22b8ed9
MW
473
474 /* Read VICR register */
29848ac9 475 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
d22b8ed9
MW
476
477 /* Set Status/ID */
478 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
479 (statid & TSI148_LCSR_VICR_STID_M);
29848ac9 480 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
d22b8ed9
MW
481
482 /* Assert VMEbus IRQ */
483 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
29848ac9 484 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
d22b8ed9
MW
485
486 /* XXX Consider implementing a timeout? */
29848ac9
MW
487 wait_event_interruptible(bridge->iack_queue,
488 tsi148_iack_received(bridge));
d22b8ed9 489
29848ac9 490 mutex_unlock(&(bridge->vme_int));
d22b8ed9
MW
491
492 return 0;
493}
494
495/*
496 * Find the first error in this address range
497 */
29848ac9
MW
498static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
499 vme_address_t aspace, unsigned long long address, size_t count)
d22b8ed9
MW
500{
501 struct list_head *err_pos;
502 struct vme_bus_error *vme_err, *valid = NULL;
503 unsigned long long bound;
504
505 bound = address + count;
506
507 /*
508 * XXX We are currently not looking at the address space when parsing
509 * for errors. This is because parsing the Address Modifier Codes
510 * is going to be quite resource intensive to do properly. We
511 * should be OK just looking at the addresses and this is certainly
512 * much better than what we had before.
513 */
514 err_pos = NULL;
515 /* Iterate through errors */
516 list_for_each(err_pos, &(tsi148_bridge->vme_errors)) {
517 vme_err = list_entry(err_pos, struct vme_bus_error, list);
518 if((vme_err->address >= address) && (vme_err->address < bound)){
519 valid = vme_err;
520 break;
521 }
522 }
523
524 return valid;
525}
526
527/*
528 * Clear errors in the provided address range.
529 */
29848ac9
MW
530static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
531 vme_address_t aspace, unsigned long long address, size_t count)
d22b8ed9
MW
532{
533 struct list_head *err_pos, *temp;
534 struct vme_bus_error *vme_err;
535 unsigned long long bound;
536
537 bound = address + count;
538
539 /*
540 * XXX We are currently not looking at the address space when parsing
541 * for errors. This is because parsing the Address Modifier Codes
542 * is going to be quite resource intensive to do properly. We
543 * should be OK just looking at the addresses and this is certainly
544 * much better than what we had before.
545 */
546 err_pos = NULL;
547 /* Iterate through errors */
548 list_for_each_safe(err_pos, temp, &(tsi148_bridge->vme_errors)) {
549 vme_err = list_entry(err_pos, struct vme_bus_error, list);
550
551 if((vme_err->address >= address) && (vme_err->address < bound)){
552 list_del(err_pos);
553 kfree(vme_err);
554 }
555 }
556}
557
558/*
559 * Initialize a slave window with the requested attributes.
560 */
561int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
562 unsigned long long vme_base, unsigned long long size,
563 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
564{
565 unsigned int i, addr = 0, granularity = 0;
566 unsigned int temp_ctl = 0;
567 unsigned int vme_base_low, vme_base_high;
568 unsigned int vme_bound_low, vme_bound_high;
569 unsigned int pci_offset_low, pci_offset_high;
570 unsigned long long vme_bound, pci_offset;
29848ac9
MW
571 struct tsi148_driver *bridge;
572
573 bridge = image->parent->driver_priv;
d22b8ed9 574
d22b8ed9
MW
575 i = image->number;
576
577 switch (aspace) {
578 case VME_A16:
579 granularity = 0x10;
580 addr |= TSI148_LCSR_ITAT_AS_A16;
581 break;
582 case VME_A24:
583 granularity = 0x1000;
584 addr |= TSI148_LCSR_ITAT_AS_A24;
585 break;
586 case VME_A32:
587 granularity = 0x10000;
588 addr |= TSI148_LCSR_ITAT_AS_A32;
589 break;
590 case VME_A64:
591 granularity = 0x10000;
592 addr |= TSI148_LCSR_ITAT_AS_A64;
593 break;
594 case VME_CRCSR:
595 case VME_USER1:
596 case VME_USER2:
597 case VME_USER3:
598 case VME_USER4:
599 default:
600 printk("Invalid address space\n");
601 return -EINVAL;
602 break;
603 }
604
605 /* Convert 64-bit variables to 2x 32-bit variables */
606 reg_split(vme_base, &vme_base_high, &vme_base_low);
607
608 /*
609 * Bound address is a valid address for the window, adjust
610 * accordingly
611 */
612 vme_bound = vme_base + size - granularity;
613 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
614 pci_offset = (unsigned long long)pci_base - vme_base;
615 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
616
617 if (vme_base_low & (granularity - 1)) {
618 printk("Invalid VME base alignment\n");
619 return -EINVAL;
620 }
621 if (vme_bound_low & (granularity - 1)) {
622 printk("Invalid VME bound alignment\n");
623 return -EINVAL;
624 }
625 if (pci_offset_low & (granularity - 1)) {
626 printk("Invalid PCI Offset alignment\n");
627 return -EINVAL;
628 }
629
d22b8ed9 630 /* Disable while we are mucking around */
29848ac9 631 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9
MW
632 TSI148_LCSR_OFFSET_ITAT);
633 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
29848ac9 634 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9
MW
635 TSI148_LCSR_OFFSET_ITAT);
636
637 /* Setup mapping */
29848ac9 638 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9 639 TSI148_LCSR_OFFSET_ITSAU);
29848ac9 640 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9 641 TSI148_LCSR_OFFSET_ITSAL);
29848ac9 642 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9 643 TSI148_LCSR_OFFSET_ITEAU);
29848ac9 644 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9 645 TSI148_LCSR_OFFSET_ITEAL);
29848ac9 646 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9 647 TSI148_LCSR_OFFSET_ITOFU);
29848ac9 648 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9
MW
649 TSI148_LCSR_OFFSET_ITOFL);
650
d22b8ed9
MW
651 /* Setup 2eSST speeds */
652 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
653 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
654 case VME_2eSST160:
655 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
656 break;
657 case VME_2eSST267:
658 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
659 break;
660 case VME_2eSST320:
661 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
662 break;
663 }
664
665 /* Setup cycle types */
666 temp_ctl &= ~(0x1F << 7);
667 if (cycle & VME_BLT)
668 temp_ctl |= TSI148_LCSR_ITAT_BLT;
669 if (cycle & VME_MBLT)
670 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
671 if (cycle & VME_2eVME)
672 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
673 if (cycle & VME_2eSST)
674 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
675 if (cycle & VME_2eSSTB)
676 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
677
678 /* Setup address space */
679 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
680 temp_ctl |= addr;
681
682 temp_ctl &= ~0xF;
683 if (cycle & VME_SUPER)
684 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
685 if (cycle & VME_USER)
686 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
687 if (cycle & VME_PROG)
688 temp_ctl |= TSI148_LCSR_ITAT_PGM;
689 if (cycle & VME_DATA)
690 temp_ctl |= TSI148_LCSR_ITAT_DATA;
691
692 /* Write ctl reg without enable */
29848ac9 693 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9
MW
694 TSI148_LCSR_OFFSET_ITAT);
695
696 if (enabled)
697 temp_ctl |= TSI148_LCSR_ITAT_EN;
698
29848ac9 699 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9
MW
700 TSI148_LCSR_OFFSET_ITAT);
701
702 return 0;
703}
704
705/*
706 * Get slave window configuration.
d22b8ed9
MW
707 */
708int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
709 unsigned long long *vme_base, unsigned long long *size,
710 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
711{
712 unsigned int i, granularity = 0, ctl = 0;
713 unsigned int vme_base_low, vme_base_high;
714 unsigned int vme_bound_low, vme_bound_high;
715 unsigned int pci_offset_low, pci_offset_high;
716 unsigned long long vme_bound, pci_offset;
29848ac9 717 struct tsi148_driver *bridge;
d22b8ed9 718
29848ac9 719 bridge = image->parent->driver_priv;
d22b8ed9
MW
720
721 i = image->number;
722
723 /* Read registers */
29848ac9 724 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9
MW
725 TSI148_LCSR_OFFSET_ITAT);
726
29848ac9 727 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9 728 TSI148_LCSR_OFFSET_ITSAU);
29848ac9 729 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9 730 TSI148_LCSR_OFFSET_ITSAL);
29848ac9 731 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9 732 TSI148_LCSR_OFFSET_ITEAU);
29848ac9 733 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9 734 TSI148_LCSR_OFFSET_ITEAL);
29848ac9 735 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9 736 TSI148_LCSR_OFFSET_ITOFU);
29848ac9 737 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9
MW
738 TSI148_LCSR_OFFSET_ITOFL);
739
740 /* Convert 64-bit variables to 2x 32-bit variables */
741 reg_join(vme_base_high, vme_base_low, vme_base);
742 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
743 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
744
745 *pci_base = (dma_addr_t)vme_base + pci_offset;
746
747 *enabled = 0;
748 *aspace = 0;
749 *cycle = 0;
750
751 if (ctl & TSI148_LCSR_ITAT_EN)
752 *enabled = 1;
753
754 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
755 granularity = 0x10;
756 *aspace |= VME_A16;
757 }
758 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
759 granularity = 0x1000;
760 *aspace |= VME_A24;
761 }
762 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
763 granularity = 0x10000;
764 *aspace |= VME_A32;
765 }
766 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
767 granularity = 0x10000;
768 *aspace |= VME_A64;
769 }
770
771 /* Need granularity before we set the size */
772 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
773
774
775 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
776 *cycle |= VME_2eSST160;
777 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
778 *cycle |= VME_2eSST267;
779 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
780 *cycle |= VME_2eSST320;
781
782 if (ctl & TSI148_LCSR_ITAT_BLT)
783 *cycle |= VME_BLT;
784 if (ctl & TSI148_LCSR_ITAT_MBLT)
785 *cycle |= VME_MBLT;
786 if (ctl & TSI148_LCSR_ITAT_2eVME)
787 *cycle |= VME_2eVME;
788 if (ctl & TSI148_LCSR_ITAT_2eSST)
789 *cycle |= VME_2eSST;
790 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
791 *cycle |= VME_2eSSTB;
792
793 if (ctl & TSI148_LCSR_ITAT_SUPR)
794 *cycle |= VME_SUPER;
795 if (ctl & TSI148_LCSR_ITAT_NPRIV)
796 *cycle |= VME_USER;
797 if (ctl & TSI148_LCSR_ITAT_PGM)
798 *cycle |= VME_PROG;
799 if (ctl & TSI148_LCSR_ITAT_DATA)
800 *cycle |= VME_DATA;
801
802 return 0;
803}
804
805/*
806 * Allocate and map PCI Resource
807 */
808static int tsi148_alloc_resource(struct vme_master_resource *image,
809 unsigned long long size)
810{
811 unsigned long long existing_size;
812 int retval = 0;
813 struct pci_dev *pdev;
29848ac9
MW
814 struct vme_bridge *tsi148_bridge;
815
816 tsi148_bridge = image->parent;
d22b8ed9
MW
817
818 /* Find pci_dev container of dev */
819 if (tsi148_bridge->parent == NULL) {
820 printk("Dev entry NULL\n");
821 return -EINVAL;
822 }
823 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
824
8fafb476
MW
825 existing_size = (unsigned long long)(image->bus_resource.end -
826 image->bus_resource.start);
d22b8ed9
MW
827
828 /* If the existing size is OK, return */
59c22904 829 if ((size != 0) && (existing_size == (size - 1)))
d22b8ed9
MW
830 return 0;
831
832 if (existing_size != 0) {
833 iounmap(image->kern_base);
834 image->kern_base = NULL;
8fafb476
MW
835 if (image->bus_resource.name != NULL)
836 kfree(image->bus_resource.name);
837 release_resource(&(image->bus_resource));
838 memset(&(image->bus_resource), 0, sizeof(struct resource));
d22b8ed9
MW
839 }
840
59c22904
MW
841 /* Exit here if size is zero */
842 if (size == 0) {
843 return 0;
844 }
845
8fafb476
MW
846 if (image->bus_resource.name == NULL) {
847 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
848 if (image->bus_resource.name == NULL) {
d22b8ed9
MW
849 printk(KERN_ERR "Unable to allocate memory for resource"
850 " name\n");
851 retval = -ENOMEM;
852 goto err_name;
853 }
854 }
855
8fafb476 856 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
d22b8ed9
MW
857 image->number);
858
8fafb476
MW
859 image->bus_resource.start = 0;
860 image->bus_resource.end = (unsigned long)size;
861 image->bus_resource.flags = IORESOURCE_MEM;
d22b8ed9
MW
862
863 retval = pci_bus_alloc_resource(pdev->bus,
8fafb476 864 &(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
d22b8ed9
MW
865 0, NULL, NULL);
866 if (retval) {
867 printk(KERN_ERR "Failed to allocate mem resource for "
868 "window %d size 0x%lx start 0x%lx\n",
869 image->number, (unsigned long)size,
8fafb476 870 (unsigned long)image->bus_resource.start);
d22b8ed9
MW
871 goto err_resource;
872 }
873
874 image->kern_base = ioremap_nocache(
8fafb476 875 image->bus_resource.start, size);
d22b8ed9
MW
876 if (image->kern_base == NULL) {
877 printk(KERN_ERR "Failed to remap resource\n");
878 retval = -ENOMEM;
879 goto err_remap;
880 }
881
882 return 0;
883
884 iounmap(image->kern_base);
885 image->kern_base = NULL;
886err_remap:
8fafb476 887 release_resource(&(image->bus_resource));
d22b8ed9 888err_resource:
8fafb476
MW
889 kfree(image->bus_resource.name);
890 memset(&(image->bus_resource), 0, sizeof(struct resource));
d22b8ed9
MW
891err_name:
892 return retval;
893}
894
895/*
896 * Free and unmap PCI Resource
897 */
898static void tsi148_free_resource(struct vme_master_resource *image)
899{
900 iounmap(image->kern_base);
901 image->kern_base = NULL;
8fafb476
MW
902 release_resource(&(image->bus_resource));
903 kfree(image->bus_resource.name);
904 memset(&(image->bus_resource), 0, sizeof(struct resource));
d22b8ed9
MW
905}
906
907/*
908 * Set the attributes of an outbound window.
909 */
910int tsi148_master_set( struct vme_master_resource *image, int enabled,
911 unsigned long long vme_base, unsigned long long size,
912 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
913{
914 int retval = 0;
915 unsigned int i;
916 unsigned int temp_ctl = 0;
917 unsigned int pci_base_low, pci_base_high;
918 unsigned int pci_bound_low, pci_bound_high;
919 unsigned int vme_offset_low, vme_offset_high;
920 unsigned long long pci_bound, vme_offset, pci_base;
29848ac9
MW
921 struct tsi148_driver *bridge;
922
923 bridge = image->parent->driver_priv;
d22b8ed9
MW
924
925 /* Verify input data */
926 if (vme_base & 0xFFFF) {
59c22904 927 printk(KERN_ERR "Invalid VME Window alignment\n");
d22b8ed9
MW
928 retval = -EINVAL;
929 goto err_window;
930 }
59c22904
MW
931
932 if ((size == 0) && (enabled != 0)) {
933 printk(KERN_ERR "Size must be non-zero for enabled windows\n");
d22b8ed9
MW
934 retval = -EINVAL;
935 goto err_window;
936 }
937
938 spin_lock(&(image->lock));
939
940 /* Let's allocate the resource here rather than further up the stack as
59c22904
MW
941 * it avoids pushing loads of bus dependant stuff up the stack. If size
942 * is zero, any existing resource will be freed.
d22b8ed9
MW
943 */
944 retval = tsi148_alloc_resource(image, size);
945 if (retval) {
946 spin_unlock(&(image->lock));
59c22904
MW
947 printk(KERN_ERR "Unable to allocate memory for "
948 "resource\n");
d22b8ed9
MW
949 goto err_res;
950 }
951
59c22904
MW
952 if (size == 0) {
953 pci_base = 0;
954 pci_bound = 0;
955 vme_offset = 0;
956 } else {
8fafb476 957 pci_base = (unsigned long long)image->bus_resource.start;
59c22904
MW
958
959 /*
960 * Bound address is a valid address for the window, adjust
961 * according to window granularity.
962 */
963 pci_bound = pci_base + (size - 0x10000);
964 vme_offset = vme_base - pci_base;
965 }
d22b8ed9
MW
966
967 /* Convert 64-bit variables to 2x 32-bit variables */
968 reg_split(pci_base, &pci_base_high, &pci_base_low);
969 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
970 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
971
972 if (pci_base_low & 0xFFFF) {
973 spin_unlock(&(image->lock));
59c22904 974 printk(KERN_ERR "Invalid PCI base alignment\n");
d22b8ed9
MW
975 retval = -EINVAL;
976 goto err_gran;
977 }
978 if (pci_bound_low & 0xFFFF) {
979 spin_unlock(&(image->lock));
59c22904 980 printk(KERN_ERR "Invalid PCI bound alignment\n");
d22b8ed9
MW
981 retval = -EINVAL;
982 goto err_gran;
983 }
984 if (vme_offset_low & 0xFFFF) {
985 spin_unlock(&(image->lock));
59c22904 986 printk(KERN_ERR "Invalid VME Offset alignment\n");
d22b8ed9
MW
987 retval = -EINVAL;
988 goto err_gran;
989 }
990
991 i = image->number;
992
993 /* Disable while we are mucking around */
29848ac9 994 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9
MW
995 TSI148_LCSR_OFFSET_OTAT);
996 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
29848ac9 997 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9
MW
998 TSI148_LCSR_OFFSET_OTAT);
999
d22b8ed9
MW
1000 /* Setup 2eSST speeds */
1001 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
1002 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1003 case VME_2eSST160:
1004 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
1005 break;
1006 case VME_2eSST267:
1007 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1008 break;
1009 case VME_2eSST320:
1010 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1011 break;
1012 }
1013
1014 /* Setup cycle types */
1015 if (cycle & VME_BLT) {
1016 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1017 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1018 }
1019 if (cycle & VME_MBLT) {
1020 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1021 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1022 }
1023 if (cycle & VME_2eVME) {
1024 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1025 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1026 }
1027 if (cycle & VME_2eSST) {
1028 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1029 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1030 }
1031 if (cycle & VME_2eSSTB) {
59c22904
MW
1032 printk(KERN_WARNING "Currently not setting Broadcast Select "
1033 "Registers\n");
d22b8ed9
MW
1034 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1035 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1036 }
1037
1038 /* Setup data width */
1039 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1040 switch (dwidth) {
1041 case VME_D16:
1042 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1043 break;
1044 case VME_D32:
1045 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1046 break;
1047 default:
1048 spin_unlock(&(image->lock));
59c22904 1049 printk(KERN_ERR "Invalid data width\n");
d22b8ed9
MW
1050 retval = -EINVAL;
1051 goto err_dwidth;
1052 }
1053
1054 /* Setup address space */
1055 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1056 switch (aspace) {
1057 case VME_A16:
1058 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1059 break;
1060 case VME_A24:
1061 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1062 break;
1063 case VME_A32:
1064 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1065 break;
1066 case VME_A64:
1067 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1068 break;
1069 case VME_CRCSR:
1070 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1071 break;
1072 case VME_USER1:
1073 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1074 break;
1075 case VME_USER2:
1076 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1077 break;
1078 case VME_USER3:
1079 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1080 break;
1081 case VME_USER4:
1082 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1083 break;
1084 default:
1085 spin_unlock(&(image->lock));
59c22904 1086 printk(KERN_ERR "Invalid address space\n");
d22b8ed9
MW
1087 retval = -EINVAL;
1088 goto err_aspace;
1089 break;
1090 }
1091
1092 temp_ctl &= ~(3<<4);
1093 if (cycle & VME_SUPER)
1094 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1095 if (cycle & VME_PROG)
1096 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1097
1098 /* Setup mapping */
29848ac9 1099 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9 1100 TSI148_LCSR_OFFSET_OTSAU);
29848ac9 1101 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9 1102 TSI148_LCSR_OFFSET_OTSAL);
29848ac9 1103 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9 1104 TSI148_LCSR_OFFSET_OTEAU);
29848ac9 1105 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9 1106 TSI148_LCSR_OFFSET_OTEAL);
29848ac9 1107 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9 1108 TSI148_LCSR_OFFSET_OTOFU);
29848ac9 1109 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9
MW
1110 TSI148_LCSR_OFFSET_OTOFL);
1111
d22b8ed9 1112 /* Write ctl reg without enable */
29848ac9 1113 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9
MW
1114 TSI148_LCSR_OFFSET_OTAT);
1115
1116 if (enabled)
1117 temp_ctl |= TSI148_LCSR_OTAT_EN;
1118
29848ac9 1119 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9
MW
1120 TSI148_LCSR_OFFSET_OTAT);
1121
1122 spin_unlock(&(image->lock));
1123 return 0;
1124
1125err_aspace:
1126err_dwidth:
1127err_gran:
1128 tsi148_free_resource(image);
1129err_res:
1130err_window:
1131 return retval;
1132
1133}
1134
1135/*
1136 * Set the attributes of an outbound window.
1137 *
1138 * XXX Not parsing prefetch information.
1139 */
1140int __tsi148_master_get( struct vme_master_resource *image, int *enabled,
1141 unsigned long long *vme_base, unsigned long long *size,
1142 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1143{
1144 unsigned int i, ctl;
1145 unsigned int pci_base_low, pci_base_high;
1146 unsigned int pci_bound_low, pci_bound_high;
1147 unsigned int vme_offset_low, vme_offset_high;
1148
1149 unsigned long long pci_base, pci_bound, vme_offset;
29848ac9
MW
1150 struct tsi148_driver *bridge;
1151
1152 bridge = image->parent->driver_priv;
d22b8ed9
MW
1153
1154 i = image->number;
1155
29848ac9 1156 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9
MW
1157 TSI148_LCSR_OFFSET_OTAT);
1158
29848ac9 1159 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9 1160 TSI148_LCSR_OFFSET_OTSAU);
29848ac9 1161 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9 1162 TSI148_LCSR_OFFSET_OTSAL);
29848ac9 1163 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9 1164 TSI148_LCSR_OFFSET_OTEAU);
29848ac9 1165 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9 1166 TSI148_LCSR_OFFSET_OTEAL);
29848ac9 1167 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9 1168 TSI148_LCSR_OFFSET_OTOFU);
29848ac9 1169 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9
MW
1170 TSI148_LCSR_OFFSET_OTOFL);
1171
1172 /* Convert 64-bit variables to 2x 32-bit variables */
1173 reg_join(pci_base_high, pci_base_low, &pci_base);
1174 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1175 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1176
1177 *vme_base = pci_base + vme_offset;
1178 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1179
1180 *enabled = 0;
1181 *aspace = 0;
1182 *cycle = 0;
1183 *dwidth = 0;
1184
1185 if (ctl & TSI148_LCSR_OTAT_EN)
1186 *enabled = 1;
1187
1188 /* Setup address space */
1189 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1190 *aspace |= VME_A16;
1191 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1192 *aspace |= VME_A24;
1193 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1194 *aspace |= VME_A32;
1195 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1196 *aspace |= VME_A64;
1197 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1198 *aspace |= VME_CRCSR;
1199 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1200 *aspace |= VME_USER1;
1201 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1202 *aspace |= VME_USER2;
1203 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1204 *aspace |= VME_USER3;
1205 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1206 *aspace |= VME_USER4;
1207
1208 /* Setup 2eSST speeds */
1209 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1210 *cycle |= VME_2eSST160;
1211 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1212 *cycle |= VME_2eSST267;
1213 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1214 *cycle |= VME_2eSST320;
1215
1216 /* Setup cycle types */
1217 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_SCT)
1218 *cycle |= VME_SCT;
1219 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_BLT)
1220 *cycle |= VME_BLT;
1221 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_MBLT)
1222 *cycle |= VME_MBLT;
1223 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eVME)
1224 *cycle |= VME_2eVME;
1225 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSST)
1226 *cycle |= VME_2eSST;
1227 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSSTB)
1228 *cycle |= VME_2eSSTB;
1229
1230 if (ctl & TSI148_LCSR_OTAT_SUP)
1231 *cycle |= VME_SUPER;
1232 else
1233 *cycle |= VME_USER;
1234
1235 if (ctl & TSI148_LCSR_OTAT_PGM)
1236 *cycle |= VME_PROG;
1237 else
1238 *cycle |= VME_DATA;
1239
1240 /* Setup data width */
1241 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1242 *dwidth = VME_D16;
1243 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1244 *dwidth = VME_D32;
1245
1246 return 0;
1247}
1248
1249
1250int tsi148_master_get( struct vme_master_resource *image, int *enabled,
1251 unsigned long long *vme_base, unsigned long long *size,
1252 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1253{
1254 int retval;
1255
1256 spin_lock(&(image->lock));
1257
1258 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1259 cycle, dwidth);
1260
1261 spin_unlock(&(image->lock));
1262
1263 return retval;
1264}
1265
1266ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1267 size_t count, loff_t offset)
1268{
1269 int retval, enabled;
1270 unsigned long long vme_base, size;
1271 vme_address_t aspace;
1272 vme_cycle_t cycle;
1273 vme_width_t dwidth;
1274 struct vme_bus_error *vme_err = NULL;
29848ac9
MW
1275 struct vme_bridge *tsi148_bridge;
1276
1277 tsi148_bridge = image->parent;
d22b8ed9
MW
1278
1279 spin_lock(&(image->lock));
1280
1281 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
1282 retval = count;
1283
1284 if (!err_chk)
1285 goto skip_chk;
1286
1287 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1288 &dwidth);
1289
29848ac9
MW
1290 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1291 count);
d22b8ed9
MW
1292 if(vme_err != NULL) {
1293 dev_err(image->parent->parent, "First VME read error detected "
1294 "an at address 0x%llx\n", vme_err->address);
1295 retval = vme_err->address - (vme_base + offset);
1296 /* Clear down save errors in this address range */
29848ac9
MW
1297 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1298 count);
d22b8ed9
MW
1299 }
1300
1301skip_chk:
1302 spin_unlock(&(image->lock));
1303
1304 return retval;
1305}
1306
1307
d22b8ed9
MW
1308ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1309 size_t count, loff_t offset)
1310{
1311 int retval = 0, enabled;
1312 unsigned long long vme_base, size;
1313 vme_address_t aspace;
1314 vme_cycle_t cycle;
1315 vme_width_t dwidth;
1316
1317 struct vme_bus_error *vme_err = NULL;
29848ac9
MW
1318 struct vme_bridge *tsi148_bridge;
1319 struct tsi148_driver *bridge;
1320
1321 tsi148_bridge = image->parent;
1322
1323 bridge = tsi148_bridge->driver_priv;
d22b8ed9
MW
1324
1325 spin_lock(&(image->lock));
1326
1327 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
1328 retval = count;
1329
1330 /*
1331 * Writes are posted. We need to do a read on the VME bus to flush out
1332 * all of the writes before we check for errors. We can't guarentee
1333 * that reading the data we have just written is safe. It is believed
1334 * that there isn't any read, write re-ordering, so we can read any
1335 * location in VME space, so lets read the Device ID from the tsi148's
1336 * own registers as mapped into CR/CSR space.
1337 *
1338 * We check for saved errors in the written address range/space.
1339 */
1340
1341 if (!err_chk)
1342 goto skip_chk;
1343
1344 /*
1345 * Get window info first, to maximise the time that the buffers may
1346 * fluch on their own
1347 */
1348 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1349 &dwidth);
1350
29848ac9 1351 ioread16(bridge->flush_image->kern_base + 0x7F000);
d22b8ed9 1352
29848ac9
MW
1353 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1354 count);
d22b8ed9
MW
1355 if(vme_err != NULL) {
1356 printk("First VME write error detected an at address 0x%llx\n",
1357 vme_err->address);
1358 retval = vme_err->address - (vme_base + offset);
1359 /* Clear down save errors in this address range */
29848ac9
MW
1360 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1361 count);
d22b8ed9
MW
1362 }
1363
1364skip_chk:
1365 spin_unlock(&(image->lock));
1366
1367 return retval;
1368}
1369
1370/*
1371 * Perform an RMW cycle on the VME bus.
1372 *
1373 * Requires a previously configured master window, returns final value.
1374 */
1375unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1376 unsigned int mask, unsigned int compare, unsigned int swap,
1377 loff_t offset)
1378{
1379 unsigned long long pci_addr;
1380 unsigned int pci_addr_high, pci_addr_low;
1381 u32 tmp, result;
1382 int i;
29848ac9 1383 struct tsi148_driver *bridge;
d22b8ed9 1384
29848ac9 1385 bridge = image->parent->driver_priv;
d22b8ed9
MW
1386
1387 /* Find the PCI address that maps to the desired VME address */
1388 i = image->number;
1389
1390 /* Locking as we can only do one of these at a time */
29848ac9 1391 mutex_lock(&(bridge->vme_rmw));
d22b8ed9
MW
1392
1393 /* Lock image */
1394 spin_lock(&(image->lock));
1395
29848ac9 1396 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9 1397 TSI148_LCSR_OFFSET_OTSAU);
29848ac9 1398 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9
MW
1399 TSI148_LCSR_OFFSET_OTSAL);
1400
1401 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1402 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1403
1404 /* Configure registers */
29848ac9
MW
1405 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1406 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1407 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1408 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1409 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
d22b8ed9
MW
1410
1411 /* Enable RMW */
29848ac9 1412 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
d22b8ed9 1413 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
29848ac9 1414 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
d22b8ed9
MW
1415
1416 /* Kick process off with a read to the required address. */
1417 result = ioread32be(image->kern_base + offset);
1418
1419 /* Disable RMW */
29848ac9 1420 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
d22b8ed9 1421 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
29848ac9 1422 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
d22b8ed9
MW
1423
1424 spin_unlock(&(image->lock));
1425
29848ac9 1426 mutex_unlock(&(bridge->vme_rmw));
d22b8ed9
MW
1427
1428 return result;
1429}
1430
1431static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
1432 vme_cycle_t cycle, vme_width_t dwidth)
1433{
1434 /* Setup 2eSST speeds */
1435 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1436 case VME_2eSST160:
1437 *attr |= TSI148_LCSR_DSAT_2eSSTM_160;
1438 break;
1439 case VME_2eSST267:
1440 *attr |= TSI148_LCSR_DSAT_2eSSTM_267;
1441 break;
1442 case VME_2eSST320:
1443 *attr |= TSI148_LCSR_DSAT_2eSSTM_320;
1444 break;
1445 }
1446
1447 /* Setup cycle types */
1448 if (cycle & VME_SCT) {
1449 *attr |= TSI148_LCSR_DSAT_TM_SCT;
1450 }
1451 if (cycle & VME_BLT) {
1452 *attr |= TSI148_LCSR_DSAT_TM_BLT;
1453 }
1454 if (cycle & VME_MBLT) {
1455 *attr |= TSI148_LCSR_DSAT_TM_MBLT;
1456 }
1457 if (cycle & VME_2eVME) {
1458 *attr |= TSI148_LCSR_DSAT_TM_2eVME;
1459 }
1460 if (cycle & VME_2eSST) {
1461 *attr |= TSI148_LCSR_DSAT_TM_2eSST;
1462 }
1463 if (cycle & VME_2eSSTB) {
1464 printk("Currently not setting Broadcast Select Registers\n");
1465 *attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
1466 }
1467
1468 /* Setup data width */
1469 switch (dwidth) {
1470 case VME_D16:
1471 *attr |= TSI148_LCSR_DSAT_DBW_16;
1472 break;
1473 case VME_D32:
1474 *attr |= TSI148_LCSR_DSAT_DBW_32;
1475 break;
1476 default:
1477 printk("Invalid data width\n");
1478 return -EINVAL;
1479 }
1480
1481 /* Setup address space */
1482 switch (aspace) {
1483 case VME_A16:
1484 *attr |= TSI148_LCSR_DSAT_AMODE_A16;
1485 break;
1486 case VME_A24:
1487 *attr |= TSI148_LCSR_DSAT_AMODE_A24;
1488 break;
1489 case VME_A32:
1490 *attr |= TSI148_LCSR_DSAT_AMODE_A32;
1491 break;
1492 case VME_A64:
1493 *attr |= TSI148_LCSR_DSAT_AMODE_A64;
1494 break;
1495 case VME_CRCSR:
1496 *attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1497 break;
1498 case VME_USER1:
1499 *attr |= TSI148_LCSR_DSAT_AMODE_USER1;
1500 break;
1501 case VME_USER2:
1502 *attr |= TSI148_LCSR_DSAT_AMODE_USER2;
1503 break;
1504 case VME_USER3:
1505 *attr |= TSI148_LCSR_DSAT_AMODE_USER3;
1506 break;
1507 case VME_USER4:
1508 *attr |= TSI148_LCSR_DSAT_AMODE_USER4;
1509 break;
1510 default:
1511 printk("Invalid address space\n");
1512 return -EINVAL;
1513 break;
1514 }
1515
1516 if (cycle & VME_SUPER)
1517 *attr |= TSI148_LCSR_DSAT_SUP;
1518 if (cycle & VME_PROG)
1519 *attr |= TSI148_LCSR_DSAT_PGM;
1520
1521 return 0;
1522}
1523
1524static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
1525 vme_cycle_t cycle, vme_width_t dwidth)
1526{
1527 /* Setup 2eSST speeds */
1528 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1529 case VME_2eSST160:
1530 *attr |= TSI148_LCSR_DDAT_2eSSTM_160;
1531 break;
1532 case VME_2eSST267:
1533 *attr |= TSI148_LCSR_DDAT_2eSSTM_267;
1534 break;
1535 case VME_2eSST320:
1536 *attr |= TSI148_LCSR_DDAT_2eSSTM_320;
1537 break;
1538 }
1539
1540 /* Setup cycle types */
1541 if (cycle & VME_SCT) {
1542 *attr |= TSI148_LCSR_DDAT_TM_SCT;
1543 }
1544 if (cycle & VME_BLT) {
1545 *attr |= TSI148_LCSR_DDAT_TM_BLT;
1546 }
1547 if (cycle & VME_MBLT) {
1548 *attr |= TSI148_LCSR_DDAT_TM_MBLT;
1549 }
1550 if (cycle & VME_2eVME) {
1551 *attr |= TSI148_LCSR_DDAT_TM_2eVME;
1552 }
1553 if (cycle & VME_2eSST) {
1554 *attr |= TSI148_LCSR_DDAT_TM_2eSST;
1555 }
1556 if (cycle & VME_2eSSTB) {
1557 printk("Currently not setting Broadcast Select Registers\n");
1558 *attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
1559 }
1560
1561 /* Setup data width */
1562 switch (dwidth) {
1563 case VME_D16:
1564 *attr |= TSI148_LCSR_DDAT_DBW_16;
1565 break;
1566 case VME_D32:
1567 *attr |= TSI148_LCSR_DDAT_DBW_32;
1568 break;
1569 default:
1570 printk("Invalid data width\n");
1571 return -EINVAL;
1572 }
1573
1574 /* Setup address space */
1575 switch (aspace) {
1576 case VME_A16:
1577 *attr |= TSI148_LCSR_DDAT_AMODE_A16;
1578 break;
1579 case VME_A24:
1580 *attr |= TSI148_LCSR_DDAT_AMODE_A24;
1581 break;
1582 case VME_A32:
1583 *attr |= TSI148_LCSR_DDAT_AMODE_A32;
1584 break;
1585 case VME_A64:
1586 *attr |= TSI148_LCSR_DDAT_AMODE_A64;
1587 break;
1588 case VME_CRCSR:
1589 *attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1590 break;
1591 case VME_USER1:
1592 *attr |= TSI148_LCSR_DDAT_AMODE_USER1;
1593 break;
1594 case VME_USER2:
1595 *attr |= TSI148_LCSR_DDAT_AMODE_USER2;
1596 break;
1597 case VME_USER3:
1598 *attr |= TSI148_LCSR_DDAT_AMODE_USER3;
1599 break;
1600 case VME_USER4:
1601 *attr |= TSI148_LCSR_DDAT_AMODE_USER4;
1602 break;
1603 default:
1604 printk("Invalid address space\n");
1605 return -EINVAL;
1606 break;
1607 }
1608
1609 if (cycle & VME_SUPER)
1610 *attr |= TSI148_LCSR_DDAT_SUP;
1611 if (cycle & VME_PROG)
1612 *attr |= TSI148_LCSR_DDAT_PGM;
1613
1614 return 0;
1615}
1616
1617/*
1618 * Add a link list descriptor to the list
d22b8ed9
MW
1619 */
1620int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
1621 struct vme_dma_attr *dest, size_t count)
1622{
1623 struct tsi148_dma_entry *entry, *prev;
1624 u32 address_high, address_low;
1625 struct vme_dma_pattern *pattern_attr;
1626 struct vme_dma_pci *pci_attr;
1627 struct vme_dma_vme *vme_attr;
1628 dma_addr_t desc_ptr;
1629 int retval = 0;
1630
bb9ea89e 1631 /* Descriptor must be aligned on 64-bit boundaries */
d22b8ed9
MW
1632 entry = (struct tsi148_dma_entry *)kmalloc(
1633 sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1634 if (entry == NULL) {
1635 printk("Failed to allocate memory for dma resource "
1636 "structure\n");
1637 retval = -ENOMEM;
1638 goto err_mem;
1639 }
1640
1641 /* Test descriptor alignment */
1642 if ((unsigned long)&(entry->descriptor) & 0x7) {
1643 printk("Descriptor not aligned to 8 byte boundary as "
1644 "required: %p\n", &(entry->descriptor));
1645 retval = -EINVAL;
1646 goto err_align;
1647 }
1648
1649 /* Given we are going to fill out the structure, we probably don't
1650 * need to zero it, but better safe than sorry for now.
1651 */
1652 memset(&(entry->descriptor), 0, sizeof(struct tsi148_dma_descriptor));
1653
1654 /* Fill out source part */
1655 switch (src->type) {
1656 case VME_DMA_PATTERN:
1657 pattern_attr = (struct vme_dma_pattern *)src->private;
1658
1659 entry->descriptor.dsal = pattern_attr->pattern;
1660 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
1661 /* Default behaviour is 32 bit pattern */
1662 if (pattern_attr->type & VME_DMA_PATTERN_BYTE) {
1663 entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
1664 }
1665 /* It seems that the default behaviour is to increment */
1666 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) {
1667 entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
1668 }
1669 break;
1670 case VME_DMA_PCI:
1671 pci_attr = (struct vme_dma_pci *)src->private;
1672
1673 reg_split((unsigned long long)pci_attr->address, &address_high,
1674 &address_low);
1675 entry->descriptor.dsau = address_high;
1676 entry->descriptor.dsal = address_low;
1677 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
1678 break;
1679 case VME_DMA_VME:
1680 vme_attr = (struct vme_dma_vme *)src->private;
1681
1682 reg_split((unsigned long long)vme_attr->address, &address_high,
1683 &address_low);
1684 entry->descriptor.dsau = address_high;
1685 entry->descriptor.dsal = address_low;
1686 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
1687
1688 retval = tsi148_dma_set_vme_src_attributes(
1689 &(entry->descriptor.dsat), vme_attr->aspace,
1690 vme_attr->cycle, vme_attr->dwidth);
1691 if(retval < 0 )
1692 goto err_source;
1693 break;
1694 default:
1695 printk("Invalid source type\n");
1696 retval = -EINVAL;
1697 goto err_source;
1698 break;
1699 }
1700
1701 /* Assume last link - this will be over-written by adding another */
1702 entry->descriptor.dnlau = 0;
1703 entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
1704
1705
1706 /* Fill out destination part */
1707 switch (dest->type) {
1708 case VME_DMA_PCI:
1709 pci_attr = (struct vme_dma_pci *)dest->private;
1710
1711 reg_split((unsigned long long)pci_attr->address, &address_high,
1712 &address_low);
1713 entry->descriptor.ddau = address_high;
1714 entry->descriptor.ddal = address_low;
1715 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
1716 break;
1717 case VME_DMA_VME:
1718 vme_attr = (struct vme_dma_vme *)dest->private;
1719
1720 reg_split((unsigned long long)vme_attr->address, &address_high,
1721 &address_low);
1722 entry->descriptor.ddau = address_high;
1723 entry->descriptor.ddal = address_low;
1724 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
1725
1726 retval = tsi148_dma_set_vme_dest_attributes(
1727 &(entry->descriptor.ddat), vme_attr->aspace,
1728 vme_attr->cycle, vme_attr->dwidth);
1729 if(retval < 0 )
1730 goto err_dest;
1731 break;
1732 default:
1733 printk("Invalid destination type\n");
1734 retval = -EINVAL;
1735 goto err_dest;
1736 break;
1737 }
1738
1739 /* Fill out count */
1740 entry->descriptor.dcnt = (u32)count;
1741
1742 /* Add to list */
1743 list_add_tail(&(entry->list), &(list->entries));
1744
1745 /* Fill out previous descriptors "Next Address" */
1746 if(entry->list.prev != &(list->entries)){
1747 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1748 list);
1749 /* We need the bus address for the pointer */
1750 desc_ptr = virt_to_bus(&(entry->descriptor));
1751 reg_split(desc_ptr, &(prev->descriptor.dnlau),
1752 &(prev->descriptor.dnlal));
1753 }
1754
1755 return 0;
1756
1757err_dest:
1758err_source:
1759err_align:
1760 kfree(entry);
1761err_mem:
1762 return retval;
1763}
1764
1765/*
1766 * Check to see if the provided DMA channel is busy.
1767 */
29848ac9 1768static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
d22b8ed9
MW
1769{
1770 u32 tmp;
29848ac9
MW
1771 struct tsi148_driver *bridge;
1772
1773 bridge = tsi148_bridge->driver_priv;
d22b8ed9 1774
29848ac9 1775 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
d22b8ed9
MW
1776 TSI148_LCSR_OFFSET_DSTA);
1777
1778 if (tmp & TSI148_LCSR_DSTA_BSY)
1779 return 0;
1780 else
1781 return 1;
1782
1783}
1784
1785/*
1786 * Execute a previously generated link list
1787 *
1788 * XXX Need to provide control register configuration.
1789 */
1790int tsi148_dma_list_exec(struct vme_dma_list *list)
1791{
1792 struct vme_dma_resource *ctrlr;
1793 int channel, retval = 0;
1794 struct tsi148_dma_entry *entry;
1795 dma_addr_t bus_addr;
1796 u32 bus_addr_high, bus_addr_low;
1797 u32 val, dctlreg = 0;
29848ac9 1798 struct tsi148_driver *bridge;
d22b8ed9
MW
1799
1800 ctrlr = list->parent;
1801
29848ac9
MW
1802 bridge = ctrlr->parent->driver_priv;
1803
400822fe 1804 mutex_lock(&(ctrlr->mtx));
d22b8ed9
MW
1805
1806 channel = ctrlr->number;
1807
1808 if (! list_empty(&(ctrlr->running))) {
1809 /*
1810 * XXX We have an active DMA transfer and currently haven't
1811 * sorted out the mechanism for "pending" DMA transfers.
1812 * Return busy.
1813 */
1814 /* Need to add to pending here */
400822fe 1815 mutex_unlock(&(ctrlr->mtx));
d22b8ed9
MW
1816 return -EBUSY;
1817 } else {
1818 list_add(&(list->list), &(ctrlr->running));
1819 }
d22b8ed9
MW
1820
1821 /* Get first bus address and write into registers */
1822 entry = list_first_entry(&(list->entries), struct tsi148_dma_entry,
1823 list);
1824
1825 bus_addr = virt_to_bus(&(entry->descriptor));
1826
400822fe 1827 mutex_unlock(&(ctrlr->mtx));
d22b8ed9
MW
1828
1829 reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
1830
29848ac9 1831 iowrite32be(bus_addr_high, bridge->base +
d22b8ed9 1832 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
29848ac9 1833 iowrite32be(bus_addr_low, bridge->base +
d22b8ed9
MW
1834 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1835
1836 /* Start the operation */
29848ac9 1837 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
d22b8ed9
MW
1838 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1839
29848ac9
MW
1840 wait_event_interruptible(bridge->dma_queue[channel],
1841 tsi148_dma_busy(ctrlr->parent, channel));
d22b8ed9
MW
1842 /*
1843 * Read status register, this register is valid until we kick off a
1844 * new transfer.
1845 */
29848ac9 1846 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
d22b8ed9
MW
1847 TSI148_LCSR_OFFSET_DSTA);
1848
1849 if (val & TSI148_LCSR_DSTA_VBE) {
1850 printk(KERN_ERR "tsi148: DMA Error. DSTA=%08X\n", val);
1851 retval = -EIO;
1852 }
1853
1854 /* Remove list from running list */
400822fe 1855 mutex_lock(&(ctrlr->mtx));
d22b8ed9 1856 list_del(&(list->list));
400822fe 1857 mutex_unlock(&(ctrlr->mtx));
d22b8ed9
MW
1858
1859 return retval;
1860}
1861
1862/*
1863 * Clean up a previously generated link list
1864 *
1865 * We have a separate function, don't assume that the chain can't be reused.
1866 */
1867int tsi148_dma_list_empty(struct vme_dma_list *list)
1868{
1869 struct list_head *pos, *temp;
1870 struct tsi148_dma_entry *entry;
1871
1872 /* detach and free each entry */
1873 list_for_each_safe(pos, temp, &(list->entries)) {
1874 list_del(pos);
1875 entry = list_entry(pos, struct tsi148_dma_entry, list);
1876 kfree(entry);
1877 }
1878
1879 return (0);
1880}
1881
1882/*
1883 * All 4 location monitors reside at the same base - this is therefore a
1884 * system wide configuration.
1885 *
1886 * This does not enable the LM monitor - that should be done when the first
1887 * callback is attached and disabled when the last callback is removed.
1888 */
42fb5031
MW
1889int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1890 vme_address_t aspace, vme_cycle_t cycle)
d22b8ed9
MW
1891{
1892 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1893 int i;
29848ac9
MW
1894 struct tsi148_driver *bridge;
1895
1896 bridge = lm->parent->driver_priv;
d22b8ed9 1897
42fb5031 1898 mutex_lock(&(lm->mtx));
d22b8ed9
MW
1899
1900 /* If we already have a callback attached, we can't move it! */
42fb5031 1901 for (i = 0; i < lm->monitors; i++) {
29848ac9 1902 if (bridge->lm_callback[i] != NULL) {
42fb5031 1903 mutex_unlock(&(lm->mtx));
d22b8ed9
MW
1904 printk("Location monitor callback attached, can't "
1905 "reset\n");
1906 return -EBUSY;
1907 }
1908 }
1909
1910 switch (aspace) {
1911 case VME_A16:
1912 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1913 break;
1914 case VME_A24:
1915 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1916 break;
1917 case VME_A32:
1918 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1919 break;
1920 case VME_A64:
1921 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1922 break;
1923 default:
42fb5031 1924 mutex_unlock(&(lm->mtx));
d22b8ed9
MW
1925 printk("Invalid address space\n");
1926 return -EINVAL;
1927 break;
1928 }
1929
1930 if (cycle & VME_SUPER)
1931 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1932 if (cycle & VME_USER)
1933 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1934 if (cycle & VME_PROG)
1935 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1936 if (cycle & VME_DATA)
1937 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1938
1939 reg_split(lm_base, &lm_base_high, &lm_base_low);
1940
29848ac9
MW
1941 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1942 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1943 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
d22b8ed9 1944
42fb5031 1945 mutex_unlock(&(lm->mtx));
d22b8ed9
MW
1946
1947 return 0;
1948}
1949
1950/* Get configuration of the callback monitor and return whether it is enabled
1951 * or disabled.
1952 */
42fb5031
MW
1953int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
1954 vme_address_t *aspace, vme_cycle_t *cycle)
d22b8ed9
MW
1955{
1956 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
29848ac9
MW
1957 struct tsi148_driver *bridge;
1958
1959 bridge = lm->parent->driver_priv;
d22b8ed9 1960
42fb5031 1961 mutex_lock(&(lm->mtx));
d22b8ed9 1962
29848ac9
MW
1963 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1964 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1965 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
d22b8ed9
MW
1966
1967 reg_join(lm_base_high, lm_base_low, lm_base);
1968
1969 if (lm_ctl & TSI148_LCSR_LMAT_EN)
1970 enabled = 1;
1971
1972 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) {
1973 *aspace |= VME_A16;
1974 }
1975 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) {
1976 *aspace |= VME_A24;
1977 }
1978 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) {
1979 *aspace |= VME_A32;
1980 }
1981 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) {
1982 *aspace |= VME_A64;
1983 }
1984
1985 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
1986 *cycle |= VME_SUPER;
1987 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
1988 *cycle |= VME_USER;
1989 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
1990 *cycle |= VME_PROG;
1991 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
1992 *cycle |= VME_DATA;
1993
42fb5031 1994 mutex_unlock(&(lm->mtx));
d22b8ed9
MW
1995
1996 return enabled;
1997}
1998
1999/*
2000 * Attach a callback to a specific location monitor.
2001 *
2002 * Callback will be passed the monitor triggered.
2003 */
42fb5031
MW
2004int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2005 void (*callback)(int))
d22b8ed9
MW
2006{
2007 u32 lm_ctl, tmp;
29848ac9
MW
2008 struct tsi148_driver *bridge;
2009
2010 bridge = lm->parent->driver_priv;
d22b8ed9 2011
42fb5031 2012 mutex_lock(&(lm->mtx));
d22b8ed9
MW
2013
2014 /* Ensure that the location monitor is configured - need PGM or DATA */
29848ac9 2015 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
d22b8ed9 2016 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
42fb5031 2017 mutex_unlock(&(lm->mtx));
d22b8ed9
MW
2018 printk("Location monitor not properly configured\n");
2019 return -EINVAL;
2020 }
2021
2022 /* Check that a callback isn't already attached */
29848ac9 2023 if (bridge->lm_callback[monitor] != NULL) {
42fb5031 2024 mutex_unlock(&(lm->mtx));
d22b8ed9
MW
2025 printk("Existing callback attached\n");
2026 return -EBUSY;
2027 }
2028
2029 /* Attach callback */
29848ac9 2030 bridge->lm_callback[monitor] = callback;
d22b8ed9
MW
2031
2032 /* Enable Location Monitor interrupt */
29848ac9 2033 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
d22b8ed9 2034 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
29848ac9 2035 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
d22b8ed9 2036
29848ac9 2037 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
d22b8ed9 2038 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
29848ac9 2039 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
d22b8ed9
MW
2040
2041 /* Ensure that global Location Monitor Enable set */
2042 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2043 lm_ctl |= TSI148_LCSR_LMAT_EN;
29848ac9 2044 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
d22b8ed9
MW
2045 }
2046
42fb5031 2047 mutex_unlock(&(lm->mtx));
d22b8ed9
MW
2048
2049 return 0;
2050}
2051
2052/*
2053 * Detach a callback function forn a specific location monitor.
2054 */
42fb5031 2055int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
d22b8ed9
MW
2056{
2057 u32 lm_en, tmp;
29848ac9
MW
2058 struct tsi148_driver *bridge;
2059
2060 bridge = lm->parent->driver_priv;
d22b8ed9 2061
42fb5031 2062 mutex_lock(&(lm->mtx));
d22b8ed9
MW
2063
2064 /* Disable Location Monitor and ensure previous interrupts are clear */
29848ac9 2065 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
d22b8ed9 2066 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
29848ac9 2067 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
d22b8ed9 2068
29848ac9 2069 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
d22b8ed9 2070 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
29848ac9 2071 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
d22b8ed9
MW
2072
2073 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
29848ac9 2074 bridge->base + TSI148_LCSR_INTC);
d22b8ed9
MW
2075
2076 /* Detach callback */
29848ac9 2077 bridge->lm_callback[monitor] = NULL;
d22b8ed9
MW
2078
2079 /* If all location monitors disabled, disable global Location Monitor */
2080 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2081 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
29848ac9 2082 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
d22b8ed9 2083 tmp &= ~TSI148_LCSR_LMAT_EN;
29848ac9 2084 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
d22b8ed9
MW
2085 }
2086
42fb5031 2087 mutex_unlock(&(lm->mtx));
d22b8ed9
MW
2088
2089 return 0;
2090}
2091
2092/*
2093 * Determine Geographical Addressing
2094 */
29848ac9 2095int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
d22b8ed9
MW
2096{
2097 u32 slot = 0;
29848ac9
MW
2098 struct tsi148_driver *bridge;
2099
2100 bridge = tsi148_bridge->driver_priv;
d22b8ed9 2101
638f199d 2102 if (!geoid) {
29848ac9 2103 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
638f199d
MW
2104 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2105 } else
2106 slot = geoid;
2107
d22b8ed9
MW
2108 return (int)slot;
2109}
2110
2111static int __init tsi148_init(void)
2112{
2113 return pci_register_driver(&tsi148_driver);
2114}
2115
2116/*
2117 * Configure CR/CSR space
2118 *
2119 * Access to the CR/CSR can be configured at power-up. The location of the
2120 * CR/CSR registers in the CR/CSR address space is determined by the boards
2121 * Auto-ID or Geographic address. This function ensures that the window is
2122 * enabled at an offset consistent with the boards geopgraphic address.
2123 *
2124 * Each board has a 512kB window, with the highest 4kB being used for the
2125 * boards registers, this means there is a fix length 508kB window which must
2126 * be mapped onto PCI memory.
2127 */
29848ac9
MW
2128static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2129 struct pci_dev *pdev)
d22b8ed9
MW
2130{
2131 u32 cbar, crat, vstat;
2132 u32 crcsr_bus_high, crcsr_bus_low;
2133 int retval;
29848ac9
MW
2134 struct tsi148_driver *bridge;
2135
2136 bridge = tsi148_bridge->driver_priv;
d22b8ed9
MW
2137
2138 /* Allocate mem for CR/CSR image */
29848ac9
MW
2139 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2140 &(bridge->crcsr_bus));
2141 if (bridge->crcsr_kernel == NULL) {
d22b8ed9
MW
2142 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
2143 "image\n");
2144 return -ENOMEM;
2145 }
2146
29848ac9 2147 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
d22b8ed9 2148
29848ac9 2149 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
d22b8ed9 2150
29848ac9
MW
2151 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2152 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
d22b8ed9
MW
2153
2154 /* Ensure that the CR/CSR is configured at the correct offset */
29848ac9 2155 cbar = ioread32be(bridge->base + TSI148_CBAR);
d22b8ed9
MW
2156 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2157
29848ac9 2158 vstat = tsi148_slot_get(tsi148_bridge);
d22b8ed9
MW
2159
2160 if (cbar != vstat) {
638f199d 2161 cbar = vstat;
d22b8ed9 2162 dev_info(&pdev->dev, "Setting CR/CSR offset\n");
29848ac9 2163 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
d22b8ed9
MW
2164 }
2165 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", cbar);
2166
29848ac9 2167 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
d22b8ed9
MW
2168 if (crat & TSI148_LCSR_CRAT_EN) {
2169 dev_info(&pdev->dev, "Enabling CR/CSR space\n");
2170 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
29848ac9 2171 bridge->base + TSI148_LCSR_CRAT);
d22b8ed9
MW
2172 } else
2173 dev_info(&pdev->dev, "CR/CSR already enabled\n");
2174
2175 /* If we want flushed, error-checked writes, set up a window
2176 * over the CR/CSR registers. We read from here to safely flush
2177 * through VME writes.
2178 */
2179 if(err_chk) {
29848ac9
MW
2180 retval = tsi148_master_set(bridge->flush_image, 1,
2181 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2182 VME_D16);
d22b8ed9
MW
2183 if (retval)
2184 dev_err(&pdev->dev, "Configuring flush image failed\n");
2185 }
2186
2187 return 0;
2188
2189}
2190
29848ac9
MW
2191static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2192 struct pci_dev *pdev)
d22b8ed9
MW
2193{
2194 u32 crat;
29848ac9
MW
2195 struct tsi148_driver *bridge;
2196
2197 bridge = tsi148_bridge->driver_priv;
d22b8ed9
MW
2198
2199 /* Turn off CR/CSR space */
29848ac9 2200 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
d22b8ed9 2201 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
29848ac9 2202 bridge->base + TSI148_LCSR_CRAT);
d22b8ed9
MW
2203
2204 /* Free image */
29848ac9
MW
2205 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2206 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
d22b8ed9 2207
29848ac9
MW
2208 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2209 bridge->crcsr_bus);
d22b8ed9
MW
2210}
2211
2212static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2213{
2214 int retval, i, master_num;
2215 u32 data;
2216 struct list_head *pos = NULL;
29848ac9
MW
2217 struct vme_bridge *tsi148_bridge;
2218 struct tsi148_driver *tsi148_device;
d22b8ed9
MW
2219 struct vme_master_resource *master_image;
2220 struct vme_slave_resource *slave_image;
2221 struct vme_dma_resource *dma_ctrlr;
42fb5031 2222 struct vme_lm_resource *lm;
d22b8ed9
MW
2223
2224 /* If we want to support more than one of each bridge, we need to
2225 * dynamically generate this so we get one per device
2226 */
2227 tsi148_bridge = (struct vme_bridge *)kmalloc(sizeof(struct vme_bridge),
2228 GFP_KERNEL);
2229 if (tsi148_bridge == NULL) {
2230 dev_err(&pdev->dev, "Failed to allocate memory for device "
2231 "structure\n");
2232 retval = -ENOMEM;
2233 goto err_struct;
2234 }
2235
2236 memset(tsi148_bridge, 0, sizeof(struct vme_bridge));
2237
29848ac9
MW
2238 tsi148_device = kmalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2239 if (tsi148_device == NULL) {
2240 dev_err(&pdev->dev, "Failed to allocate memory for device "
2241 "structure\n");
2242 retval = -ENOMEM;
2243 goto err_driver;
2244 }
2245
2246 memset(tsi148_device, 0, sizeof(struct tsi148_driver));
2247
2248 tsi148_bridge->driver_priv = tsi148_device;
2249
d22b8ed9
MW
2250 /* Enable the device */
2251 retval = pci_enable_device(pdev);
2252 if (retval) {
2253 dev_err(&pdev->dev, "Unable to enable device\n");
2254 goto err_enable;
2255 }
2256
2257 /* Map Registers */
2258 retval = pci_request_regions(pdev, driver_name);
2259 if (retval) {
2260 dev_err(&pdev->dev, "Unable to reserve resources\n");
2261 goto err_resource;
2262 }
2263
2264 /* map registers in BAR 0 */
29848ac9
MW
2265 tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2266 4096);
2267 if (!tsi148_device->base) {
d22b8ed9
MW
2268 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2269 retval = -EIO;
2270 goto err_remap;
2271 }
2272
2273 /* Check to see if the mapping worked out */
29848ac9 2274 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
d22b8ed9
MW
2275 if (data != PCI_VENDOR_ID_TUNDRA) {
2276 dev_err(&pdev->dev, "CRG region check failed\n");
2277 retval = -EIO;
2278 goto err_test;
2279 }
2280
2281 /* Initialize wait queues & mutual exclusion flags */
29848ac9
MW
2282 init_waitqueue_head(&(tsi148_device->dma_queue[0]));
2283 init_waitqueue_head(&(tsi148_device->dma_queue[1]));
2284 init_waitqueue_head(&(tsi148_device->iack_queue));
2285 mutex_init(&(tsi148_device->vme_int));
2286 mutex_init(&(tsi148_device->vme_rmw));
d22b8ed9
MW
2287
2288 tsi148_bridge->parent = &(pdev->dev);
2289 strcpy(tsi148_bridge->name, driver_name);
2290
2291 /* Setup IRQ */
2292 retval = tsi148_irq_init(tsi148_bridge);
2293 if (retval != 0) {
2294 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2295 goto err_irq;
2296 }
2297
2298 /* If we are going to flush writes, we need to read from the VME bus.
2299 * We need to do this safely, thus we read the devices own CR/CSR
2300 * register. To do this we must set up a window in CR/CSR space and
2301 * hence have one less master window resource available.
2302 */
2303 master_num = TSI148_MAX_MASTER;
2304 if(err_chk){
2305 master_num--;
29848ac9
MW
2306
2307 tsi148_device->flush_image = (struct vme_master_resource *)
2308 kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2309 if (tsi148_device->flush_image == NULL) {
d22b8ed9
MW
2310 dev_err(&pdev->dev, "Failed to allocate memory for "
2311 "flush resource structure\n");
2312 retval = -ENOMEM;
2313 goto err_master;
2314 }
29848ac9
MW
2315 tsi148_device->flush_image->parent = tsi148_bridge;
2316 spin_lock_init(&(tsi148_device->flush_image->lock));
2317 tsi148_device->flush_image->locked = 1;
2318 tsi148_device->flush_image->number = master_num;
2319 tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
2320 VME_A32 | VME_A64;
2321 tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
2322 VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
2323 VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
2324 VME_USER | VME_PROG | VME_DATA;
2325 tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
8fafb476 2326 memset(&(tsi148_device->flush_image->bus_resource), 0,
d22b8ed9 2327 sizeof(struct resource));
29848ac9 2328 tsi148_device->flush_image->kern_base = NULL;
d22b8ed9
MW
2329 }
2330
2331 /* Add master windows to list */
2332 INIT_LIST_HEAD(&(tsi148_bridge->master_resources));
2333 for (i = 0; i < master_num; i++) {
2334 master_image = (struct vme_master_resource *)kmalloc(
2335 sizeof(struct vme_master_resource), GFP_KERNEL);
2336 if (master_image == NULL) {
2337 dev_err(&pdev->dev, "Failed to allocate memory for "
2338 "master resource structure\n");
2339 retval = -ENOMEM;
2340 goto err_master;
2341 }
2342 master_image->parent = tsi148_bridge;
2343 spin_lock_init(&(master_image->lock));
2344 master_image->locked = 0;
2345 master_image->number = i;
2346 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2347 VME_A64;
2348 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2349 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2350 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2351 VME_PROG | VME_DATA;
2352 master_image->width_attr = VME_D16 | VME_D32;
8fafb476 2353 memset(&(master_image->bus_resource), 0,
d22b8ed9
MW
2354 sizeof(struct resource));
2355 master_image->kern_base = NULL;
2356 list_add_tail(&(master_image->list),
2357 &(tsi148_bridge->master_resources));
2358 }
2359
2360 /* Add slave windows to list */
2361 INIT_LIST_HEAD(&(tsi148_bridge->slave_resources));
2362 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2363 slave_image = (struct vme_slave_resource *)kmalloc(
2364 sizeof(struct vme_slave_resource), GFP_KERNEL);
2365 if (slave_image == NULL) {
2366 dev_err(&pdev->dev, "Failed to allocate memory for "
2367 "slave resource structure\n");
2368 retval = -ENOMEM;
2369 goto err_slave;
2370 }
2371 slave_image->parent = tsi148_bridge;
400822fe 2372 mutex_init(&(slave_image->mtx));
d22b8ed9
MW
2373 slave_image->locked = 0;
2374 slave_image->number = i;
2375 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2376 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2377 VME_USER3 | VME_USER4;
2378 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2379 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2380 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2381 VME_PROG | VME_DATA;
2382 list_add_tail(&(slave_image->list),
2383 &(tsi148_bridge->slave_resources));
2384 }
2385
2386 /* Add dma engines to list */
2387 INIT_LIST_HEAD(&(tsi148_bridge->dma_resources));
2388 for (i = 0; i < TSI148_MAX_DMA; i++) {
2389 dma_ctrlr = (struct vme_dma_resource *)kmalloc(
2390 sizeof(struct vme_dma_resource), GFP_KERNEL);
2391 if (dma_ctrlr == NULL) {
2392 dev_err(&pdev->dev, "Failed to allocate memory for "
2393 "dma resource structure\n");
2394 retval = -ENOMEM;
2395 goto err_dma;
2396 }
2397 dma_ctrlr->parent = tsi148_bridge;
400822fe 2398 mutex_init(&(dma_ctrlr->mtx));
d22b8ed9
MW
2399 dma_ctrlr->locked = 0;
2400 dma_ctrlr->number = i;
4f723df4
MW
2401 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2402 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2403 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2404 VME_DMA_PATTERN_TO_MEM;
d22b8ed9
MW
2405 INIT_LIST_HEAD(&(dma_ctrlr->pending));
2406 INIT_LIST_HEAD(&(dma_ctrlr->running));
2407 list_add_tail(&(dma_ctrlr->list),
2408 &(tsi148_bridge->dma_resources));
2409 }
2410
42fb5031
MW
2411 /* Add location monitor to list */
2412 INIT_LIST_HEAD(&(tsi148_bridge->lm_resources));
2413 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2414 if (lm == NULL) {
2415 dev_err(&pdev->dev, "Failed to allocate memory for "
2416 "location monitor resource structure\n");
2417 retval = -ENOMEM;
2418 goto err_lm;
2419 }
2420 lm->parent = tsi148_bridge;
2421 mutex_init(&(lm->mtx));
2422 lm->locked = 0;
2423 lm->number = 1;
2424 lm->monitors = 4;
2425 list_add_tail(&(lm->list), &(tsi148_bridge->lm_resources));
2426
d22b8ed9
MW
2427 tsi148_bridge->slave_get = tsi148_slave_get;
2428 tsi148_bridge->slave_set = tsi148_slave_set;
2429 tsi148_bridge->master_get = tsi148_master_get;
2430 tsi148_bridge->master_set = tsi148_master_set;
2431 tsi148_bridge->master_read = tsi148_master_read;
2432 tsi148_bridge->master_write = tsi148_master_write;
2433 tsi148_bridge->master_rmw = tsi148_master_rmw;
2434 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2435 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2436 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
c813f592
MW
2437 tsi148_bridge->irq_set = tsi148_irq_set;
2438 tsi148_bridge->irq_generate = tsi148_irq_generate;
d22b8ed9
MW
2439 tsi148_bridge->lm_set = tsi148_lm_set;
2440 tsi148_bridge->lm_get = tsi148_lm_get;
2441 tsi148_bridge->lm_attach = tsi148_lm_attach;
2442 tsi148_bridge->lm_detach = tsi148_lm_detach;
2443 tsi148_bridge->slot_get = tsi148_slot_get;
2444
29848ac9 2445 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
d22b8ed9
MW
2446 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2447 (data & TSI148_LCSR_VSTAT_SCONS)? "" : " not");
29848ac9 2448 if (!geoid)
638f199d
MW
2449 dev_info(&pdev->dev, "VME geographical address is %d\n",
2450 data & TSI148_LCSR_VSTAT_GA_M);
29848ac9 2451 else
638f199d
MW
2452 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2453 geoid);
29848ac9 2454
d22b8ed9
MW
2455 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2456 err_chk ? "enabled" : "disabled");
2457
29848ac9 2458 if (tsi148_crcsr_init(tsi148_bridge, pdev))
d22b8ed9
MW
2459 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2460 goto err_crcsr;
2461
d22b8ed9
MW
2462 retval = vme_register_bridge(tsi148_bridge);
2463 if (retval != 0) {
2464 dev_err(&pdev->dev, "Chip Registration failed.\n");
2465 goto err_reg;
2466 }
2467
29848ac9
MW
2468 pci_set_drvdata(pdev, tsi148_bridge);
2469
d22b8ed9 2470 /* Clear VME bus "board fail", and "power-up reset" lines */
29848ac9 2471 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
d22b8ed9
MW
2472 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2473 data |= TSI148_LCSR_VSTAT_CPURST;
29848ac9 2474 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
d22b8ed9
MW
2475
2476 return 0;
2477
2478 vme_unregister_bridge(tsi148_bridge);
2479err_reg:
29848ac9 2480 tsi148_crcsr_exit(tsi148_bridge, pdev);
d22b8ed9 2481err_crcsr:
42fb5031
MW
2482err_lm:
2483 /* resources are stored in link list */
2484 list_for_each(pos, &(tsi148_bridge->lm_resources)) {
2485 lm = list_entry(pos, struct vme_lm_resource, list);
2486 list_del(pos);
2487 kfree(lm);
2488 }
d22b8ed9
MW
2489err_dma:
2490 /* resources are stored in link list */
2491 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2492 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2493 list_del(pos);
2494 kfree(dma_ctrlr);
2495 }
2496err_slave:
2497 /* resources are stored in link list */
2498 list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2499 slave_image = list_entry(pos, struct vme_slave_resource, list);
2500 list_del(pos);
2501 kfree(slave_image);
2502 }
2503err_master:
2504 /* resources are stored in link list */
2505 list_for_each(pos, &(tsi148_bridge->master_resources)) {
2506 master_image = list_entry(pos, struct vme_master_resource, list);
2507 list_del(pos);
2508 kfree(master_image);
2509 }
2510
29848ac9 2511 tsi148_irq_exit(tsi148_device, pdev);
d22b8ed9
MW
2512err_irq:
2513err_test:
29848ac9 2514 iounmap(tsi148_device->base);
d22b8ed9
MW
2515err_remap:
2516 pci_release_regions(pdev);
2517err_resource:
2518 pci_disable_device(pdev);
2519err_enable:
29848ac9
MW
2520 kfree(tsi148_device);
2521err_driver:
d22b8ed9
MW
2522 kfree(tsi148_bridge);
2523err_struct:
2524 return retval;
2525
2526}
2527
2528static void tsi148_remove(struct pci_dev *pdev)
2529{
2530 struct list_head *pos = NULL;
2531 struct vme_master_resource *master_image;
2532 struct vme_slave_resource *slave_image;
2533 struct vme_dma_resource *dma_ctrlr;
2534 int i;
29848ac9
MW
2535 struct tsi148_driver *bridge;
2536 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2537
2538 bridge = tsi148_bridge->driver_priv;
d22b8ed9 2539
d22b8ed9 2540
29848ac9 2541 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
d22b8ed9
MW
2542
2543 /*
2544 * Shutdown all inbound and outbound windows.
2545 */
2546 for (i = 0; i < 8; i++) {
29848ac9 2547 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
d22b8ed9 2548 TSI148_LCSR_OFFSET_ITAT);
29848ac9 2549 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
d22b8ed9
MW
2550 TSI148_LCSR_OFFSET_OTAT);
2551 }
2552
2553 /*
2554 * Shutdown Location monitor.
2555 */
29848ac9 2556 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
d22b8ed9
MW
2557
2558 /*
2559 * Shutdown CRG map.
2560 */
29848ac9 2561 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
d22b8ed9
MW
2562
2563 /*
2564 * Clear error status.
2565 */
29848ac9
MW
2566 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2567 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2568 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
d22b8ed9
MW
2569
2570 /*
2571 * Remove VIRQ interrupt (if any)
2572 */
29848ac9
MW
2573 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2574 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
d22b8ed9 2575
d22b8ed9
MW
2576 /*
2577 * Map all Interrupts to PCI INTA
2578 */
29848ac9
MW
2579 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2580 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
d22b8ed9 2581
29848ac9 2582 tsi148_irq_exit(bridge, pdev);
d22b8ed9
MW
2583
2584 vme_unregister_bridge(tsi148_bridge);
2585
29848ac9 2586 tsi148_crcsr_exit(tsi148_bridge, pdev);
d22b8ed9
MW
2587
2588 /* resources are stored in link list */
2589 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2590 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2591 list_del(pos);
2592 kfree(dma_ctrlr);
2593 }
2594
2595 /* resources are stored in link list */
2596 list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2597 slave_image = list_entry(pos, struct vme_slave_resource, list);
2598 list_del(pos);
2599 kfree(slave_image);
2600 }
2601
2602 /* resources are stored in link list */
2603 list_for_each(pos, &(tsi148_bridge->master_resources)) {
638f199d
MW
2604 master_image = list_entry(pos, struct vme_master_resource,
2605 list);
d22b8ed9
MW
2606 list_del(pos);
2607 kfree(master_image);
2608 }
2609
29848ac9 2610 tsi148_irq_exit(bridge, pdev);
d22b8ed9 2611
29848ac9 2612 iounmap(bridge->base);
d22b8ed9
MW
2613
2614 pci_release_regions(pdev);
2615
2616 pci_disable_device(pdev);
2617
29848ac9
MW
2618 kfree(tsi148_bridge->driver_priv);
2619
d22b8ed9
MW
2620 kfree(tsi148_bridge);
2621}
2622
2623static void __exit tsi148_exit(void)
2624{
2625 pci_unregister_driver(&tsi148_driver);
2626
2627 printk(KERN_DEBUG "Driver removed.\n");
2628}
2629
2630MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2631module_param(err_chk, bool, 0);
2632
638f199d
MW
2633MODULE_PARM_DESC(geoid, "Override geographical addressing");
2634module_param(geoid, int, 0);
2635
d22b8ed9
MW
2636MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2637MODULE_LICENSE("GPL");
2638
2639module_init(tsi148_init);
2640module_exit(tsi148_exit);