usb: r8a66597-hcd: select a different endpoint on timeout
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / rapidio / rio.c
1 /*
2 * RapidIO interconnect services
3 * (RapidIO Interconnect Specification, http://www.rapidio.org)
4 *
5 * Copyright 2005 MontaVista Software, Inc.
6 * Matt Porter <mporter@kernel.crashing.org>
7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write/Error Management initialization and handling
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/rio.h>
24 #include <linux/rio_drv.h>
25 #include <linux/rio_ids.h>
26 #include <linux/rio_regs.h>
27 #include <linux/module.h>
28 #include <linux/spinlock.h>
29 #include <linux/slab.h>
30 #include <linux/interrupt.h>
31
32 #include "rio.h"
33
34 static LIST_HEAD(rio_devices);
35 static DEFINE_SPINLOCK(rio_global_list_lock);
36
37 static LIST_HEAD(rio_mports);
38 static DEFINE_MUTEX(rio_mport_list_lock);
39 static unsigned char next_portid;
40 static DEFINE_SPINLOCK(rio_mmap_lock);
41
42 /**
43 * rio_local_get_device_id - Get the base/extended device id for a port
44 * @port: RIO master port from which to get the deviceid
45 *
46 * Reads the base/extended device id from the local device
47 * implementing the master port. Returns the 8/16-bit device
48 * id.
49 */
50 u16 rio_local_get_device_id(struct rio_mport *port)
51 {
52 u32 result;
53
54 rio_local_read_config_32(port, RIO_DID_CSR, &result);
55
56 return (RIO_GET_DID(port->sys_size, result));
57 }
58
59 /**
60 * rio_add_device- Adds a RIO device to the device model
61 * @rdev: RIO device
62 *
63 * Adds the RIO device to the global device list and adds the RIO
64 * device to the RIO device list. Creates the generic sysfs nodes
65 * for an RIO device.
66 */
67 int rio_add_device(struct rio_dev *rdev)
68 {
69 int err;
70
71 err = device_add(&rdev->dev);
72 if (err)
73 return err;
74
75 spin_lock(&rio_global_list_lock);
76 list_add_tail(&rdev->global_list, &rio_devices);
77 spin_unlock(&rio_global_list_lock);
78
79 rio_create_sysfs_dev_files(rdev);
80
81 return 0;
82 }
83 EXPORT_SYMBOL_GPL(rio_add_device);
84
85 /**
86 * rio_request_inb_mbox - request inbound mailbox service
87 * @mport: RIO master port from which to allocate the mailbox resource
88 * @dev_id: Device specific pointer to pass on event
89 * @mbox: Mailbox number to claim
90 * @entries: Number of entries in inbound mailbox queue
91 * @minb: Callback to execute when inbound message is received
92 *
93 * Requests ownership of an inbound mailbox resource and binds
94 * a callback function to the resource. Returns %0 on success.
95 */
96 int rio_request_inb_mbox(struct rio_mport *mport,
97 void *dev_id,
98 int mbox,
99 int entries,
100 void (*minb) (struct rio_mport * mport, void *dev_id, int mbox,
101 int slot))
102 {
103 int rc = -ENOSYS;
104 struct resource *res;
105
106 if (mport->ops->open_inb_mbox == NULL)
107 goto out;
108
109 res = kmalloc(sizeof(struct resource), GFP_KERNEL);
110
111 if (res) {
112 rio_init_mbox_res(res, mbox, mbox);
113
114 /* Make sure this mailbox isn't in use */
115 if ((rc =
116 request_resource(&mport->riores[RIO_INB_MBOX_RESOURCE],
117 res)) < 0) {
118 kfree(res);
119 goto out;
120 }
121
122 mport->inb_msg[mbox].res = res;
123
124 /* Hook the inbound message callback */
125 mport->inb_msg[mbox].mcback = minb;
126
127 rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries);
128 } else
129 rc = -ENOMEM;
130
131 out:
132 return rc;
133 }
134
135 /**
136 * rio_release_inb_mbox - release inbound mailbox message service
137 * @mport: RIO master port from which to release the mailbox resource
138 * @mbox: Mailbox number to release
139 *
140 * Releases ownership of an inbound mailbox resource. Returns 0
141 * if the request has been satisfied.
142 */
143 int rio_release_inb_mbox(struct rio_mport *mport, int mbox)
144 {
145 if (mport->ops->close_inb_mbox) {
146 mport->ops->close_inb_mbox(mport, mbox);
147
148 /* Release the mailbox resource */
149 return release_resource(mport->inb_msg[mbox].res);
150 } else
151 return -ENOSYS;
152 }
153
154 /**
155 * rio_request_outb_mbox - request outbound mailbox service
156 * @mport: RIO master port from which to allocate the mailbox resource
157 * @dev_id: Device specific pointer to pass on event
158 * @mbox: Mailbox number to claim
159 * @entries: Number of entries in outbound mailbox queue
160 * @moutb: Callback to execute when outbound message is sent
161 *
162 * Requests ownership of an outbound mailbox resource and binds
163 * a callback function to the resource. Returns 0 on success.
164 */
165 int rio_request_outb_mbox(struct rio_mport *mport,
166 void *dev_id,
167 int mbox,
168 int entries,
169 void (*moutb) (struct rio_mport * mport, void *dev_id, int mbox, int slot))
170 {
171 int rc = -ENOSYS;
172 struct resource *res;
173
174 if (mport->ops->open_outb_mbox == NULL)
175 goto out;
176
177 res = kmalloc(sizeof(struct resource), GFP_KERNEL);
178
179 if (res) {
180 rio_init_mbox_res(res, mbox, mbox);
181
182 /* Make sure this outbound mailbox isn't in use */
183 if ((rc =
184 request_resource(&mport->riores[RIO_OUTB_MBOX_RESOURCE],
185 res)) < 0) {
186 kfree(res);
187 goto out;
188 }
189
190 mport->outb_msg[mbox].res = res;
191
192 /* Hook the inbound message callback */
193 mport->outb_msg[mbox].mcback = moutb;
194
195 rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries);
196 } else
197 rc = -ENOMEM;
198
199 out:
200 return rc;
201 }
202
203 /**
204 * rio_release_outb_mbox - release outbound mailbox message service
205 * @mport: RIO master port from which to release the mailbox resource
206 * @mbox: Mailbox number to release
207 *
208 * Releases ownership of an inbound mailbox resource. Returns 0
209 * if the request has been satisfied.
210 */
211 int rio_release_outb_mbox(struct rio_mport *mport, int mbox)
212 {
213 if (mport->ops->close_outb_mbox) {
214 mport->ops->close_outb_mbox(mport, mbox);
215
216 /* Release the mailbox resource */
217 return release_resource(mport->outb_msg[mbox].res);
218 } else
219 return -ENOSYS;
220 }
221
222 /**
223 * rio_setup_inb_dbell - bind inbound doorbell callback
224 * @mport: RIO master port to bind the doorbell callback
225 * @dev_id: Device specific pointer to pass on event
226 * @res: Doorbell message resource
227 * @dinb: Callback to execute when doorbell is received
228 *
229 * Adds a doorbell resource/callback pair into a port's
230 * doorbell event list. Returns 0 if the request has been
231 * satisfied.
232 */
233 static int
234 rio_setup_inb_dbell(struct rio_mport *mport, void *dev_id, struct resource *res,
235 void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, u16 dst,
236 u16 info))
237 {
238 int rc = 0;
239 struct rio_dbell *dbell;
240
241 if (!(dbell = kmalloc(sizeof(struct rio_dbell), GFP_KERNEL))) {
242 rc = -ENOMEM;
243 goto out;
244 }
245
246 dbell->res = res;
247 dbell->dinb = dinb;
248 dbell->dev_id = dev_id;
249
250 list_add_tail(&dbell->node, &mport->dbells);
251
252 out:
253 return rc;
254 }
255
256 /**
257 * rio_request_inb_dbell - request inbound doorbell message service
258 * @mport: RIO master port from which to allocate the doorbell resource
259 * @dev_id: Device specific pointer to pass on event
260 * @start: Doorbell info range start
261 * @end: Doorbell info range end
262 * @dinb: Callback to execute when doorbell is received
263 *
264 * Requests ownership of an inbound doorbell resource and binds
265 * a callback function to the resource. Returns 0 if the request
266 * has been satisfied.
267 */
268 int rio_request_inb_dbell(struct rio_mport *mport,
269 void *dev_id,
270 u16 start,
271 u16 end,
272 void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src,
273 u16 dst, u16 info))
274 {
275 int rc = 0;
276
277 struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL);
278
279 if (res) {
280 rio_init_dbell_res(res, start, end);
281
282 /* Make sure these doorbells aren't in use */
283 if ((rc =
284 request_resource(&mport->riores[RIO_DOORBELL_RESOURCE],
285 res)) < 0) {
286 kfree(res);
287 goto out;
288 }
289
290 /* Hook the doorbell callback */
291 rc = rio_setup_inb_dbell(mport, dev_id, res, dinb);
292 } else
293 rc = -ENOMEM;
294
295 out:
296 return rc;
297 }
298
299 /**
300 * rio_release_inb_dbell - release inbound doorbell message service
301 * @mport: RIO master port from which to release the doorbell resource
302 * @start: Doorbell info range start
303 * @end: Doorbell info range end
304 *
305 * Releases ownership of an inbound doorbell resource and removes
306 * callback from the doorbell event list. Returns 0 if the request
307 * has been satisfied.
308 */
309 int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end)
310 {
311 int rc = 0, found = 0;
312 struct rio_dbell *dbell;
313
314 list_for_each_entry(dbell, &mport->dbells, node) {
315 if ((dbell->res->start == start) && (dbell->res->end == end)) {
316 found = 1;
317 break;
318 }
319 }
320
321 /* If we can't find an exact match, fail */
322 if (!found) {
323 rc = -EINVAL;
324 goto out;
325 }
326
327 /* Delete from list */
328 list_del(&dbell->node);
329
330 /* Release the doorbell resource */
331 rc = release_resource(dbell->res);
332
333 /* Free the doorbell event */
334 kfree(dbell);
335
336 out:
337 return rc;
338 }
339
340 /**
341 * rio_request_outb_dbell - request outbound doorbell message range
342 * @rdev: RIO device from which to allocate the doorbell resource
343 * @start: Doorbell message range start
344 * @end: Doorbell message range end
345 *
346 * Requests ownership of a doorbell message range. Returns a resource
347 * if the request has been satisfied or %NULL on failure.
348 */
349 struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start,
350 u16 end)
351 {
352 struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL);
353
354 if (res) {
355 rio_init_dbell_res(res, start, end);
356
357 /* Make sure these doorbells aren't in use */
358 if (request_resource(&rdev->riores[RIO_DOORBELL_RESOURCE], res)
359 < 0) {
360 kfree(res);
361 res = NULL;
362 }
363 }
364
365 return res;
366 }
367
368 /**
369 * rio_release_outb_dbell - release outbound doorbell message range
370 * @rdev: RIO device from which to release the doorbell resource
371 * @res: Doorbell resource to be freed
372 *
373 * Releases ownership of a doorbell message range. Returns 0 if the
374 * request has been satisfied.
375 */
376 int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res)
377 {
378 int rc = release_resource(res);
379
380 kfree(res);
381
382 return rc;
383 }
384
385 /**
386 * rio_request_inb_pwrite - request inbound port-write message service
387 * @rdev: RIO device to which register inbound port-write callback routine
388 * @pwcback: Callback routine to execute when port-write is received
389 *
390 * Binds a port-write callback function to the RapidIO device.
391 * Returns 0 if the request has been satisfied.
392 */
393 int rio_request_inb_pwrite(struct rio_dev *rdev,
394 int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step))
395 {
396 int rc = 0;
397
398 spin_lock(&rio_global_list_lock);
399 if (rdev->pwcback != NULL)
400 rc = -ENOMEM;
401 else
402 rdev->pwcback = pwcback;
403
404 spin_unlock(&rio_global_list_lock);
405 return rc;
406 }
407 EXPORT_SYMBOL_GPL(rio_request_inb_pwrite);
408
409 /**
410 * rio_release_inb_pwrite - release inbound port-write message service
411 * @rdev: RIO device which registered for inbound port-write callback
412 *
413 * Removes callback from the rio_dev structure. Returns 0 if the request
414 * has been satisfied.
415 */
416 int rio_release_inb_pwrite(struct rio_dev *rdev)
417 {
418 int rc = -ENOMEM;
419
420 spin_lock(&rio_global_list_lock);
421 if (rdev->pwcback) {
422 rdev->pwcback = NULL;
423 rc = 0;
424 }
425
426 spin_unlock(&rio_global_list_lock);
427 return rc;
428 }
429 EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);
430
431 /**
432 * rio_map_inb_region -- Map inbound memory region.
433 * @mport: Master port.
434 * @local: physical address of memory region to be mapped
435 * @rbase: RIO base address assigned to this window
436 * @size: Size of the memory region
437 * @rflags: Flags for mapping.
438 *
439 * Return: 0 -- Success.
440 *
441 * This function will create the mapping from RIO space to local memory.
442 */
443 int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local,
444 u64 rbase, u32 size, u32 rflags)
445 {
446 int rc = 0;
447 unsigned long flags;
448
449 if (!mport->ops->map_inb)
450 return -1;
451 spin_lock_irqsave(&rio_mmap_lock, flags);
452 rc = mport->ops->map_inb(mport, local, rbase, size, rflags);
453 spin_unlock_irqrestore(&rio_mmap_lock, flags);
454 return rc;
455 }
456 EXPORT_SYMBOL_GPL(rio_map_inb_region);
457
458 /**
459 * rio_unmap_inb_region -- Unmap the inbound memory region
460 * @mport: Master port
461 * @lstart: physical address of memory region to be unmapped
462 */
463 void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart)
464 {
465 unsigned long flags;
466 if (!mport->ops->unmap_inb)
467 return;
468 spin_lock_irqsave(&rio_mmap_lock, flags);
469 mport->ops->unmap_inb(mport, lstart);
470 spin_unlock_irqrestore(&rio_mmap_lock, flags);
471 }
472 EXPORT_SYMBOL_GPL(rio_unmap_inb_region);
473
474 /**
475 * rio_mport_get_physefb - Helper function that returns register offset
476 * for Physical Layer Extended Features Block.
477 * @port: Master port to issue transaction
478 * @local: Indicate a local master port or remote device access
479 * @destid: Destination ID of the device
480 * @hopcount: Number of switch hops to the device
481 */
482 u32
483 rio_mport_get_physefb(struct rio_mport *port, int local,
484 u16 destid, u8 hopcount)
485 {
486 u32 ext_ftr_ptr;
487 u32 ftr_header;
488
489 ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0);
490
491 while (ext_ftr_ptr) {
492 if (local)
493 rio_local_read_config_32(port, ext_ftr_ptr,
494 &ftr_header);
495 else
496 rio_mport_read_config_32(port, destid, hopcount,
497 ext_ftr_ptr, &ftr_header);
498
499 ftr_header = RIO_GET_BLOCK_ID(ftr_header);
500 switch (ftr_header) {
501
502 case RIO_EFB_SER_EP_ID_V13P:
503 case RIO_EFB_SER_EP_REC_ID_V13P:
504 case RIO_EFB_SER_EP_FREE_ID_V13P:
505 case RIO_EFB_SER_EP_ID:
506 case RIO_EFB_SER_EP_REC_ID:
507 case RIO_EFB_SER_EP_FREE_ID:
508 case RIO_EFB_SER_EP_FREC_ID:
509
510 return ext_ftr_ptr;
511
512 default:
513 break;
514 }
515
516 ext_ftr_ptr = rio_mport_get_efb(port, local, destid,
517 hopcount, ext_ftr_ptr);
518 }
519
520 return ext_ftr_ptr;
521 }
522 EXPORT_SYMBOL_GPL(rio_mport_get_physefb);
523
524 /**
525 * rio_get_comptag - Begin or continue searching for a RIO device by component tag
526 * @comp_tag: RIO component tag to match
527 * @from: Previous RIO device found in search, or %NULL for new search
528 *
529 * Iterates through the list of known RIO devices. If a RIO device is
530 * found with a matching @comp_tag, a pointer to its device
531 * structure is returned. Otherwise, %NULL is returned. A new search
532 * is initiated by passing %NULL to the @from argument. Otherwise, if
533 * @from is not %NULL, searches continue from next device on the global
534 * list.
535 */
536 struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
537 {
538 struct list_head *n;
539 struct rio_dev *rdev;
540
541 spin_lock(&rio_global_list_lock);
542 n = from ? from->global_list.next : rio_devices.next;
543
544 while (n && (n != &rio_devices)) {
545 rdev = rio_dev_g(n);
546 if (rdev->comp_tag == comp_tag)
547 goto exit;
548 n = n->next;
549 }
550 rdev = NULL;
551 exit:
552 spin_unlock(&rio_global_list_lock);
553 return rdev;
554 }
555 EXPORT_SYMBOL_GPL(rio_get_comptag);
556
557 /**
558 * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port.
559 * @rdev: Pointer to RIO device control structure
560 * @pnum: Switch port number to set LOCKOUT bit
561 * @lock: Operation : set (=1) or clear (=0)
562 */
563 int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
564 {
565 u32 regval;
566
567 rio_read_config_32(rdev,
568 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
569 &regval);
570 if (lock)
571 regval |= RIO_PORT_N_CTL_LOCKOUT;
572 else
573 regval &= ~RIO_PORT_N_CTL_LOCKOUT;
574
575 rio_write_config_32(rdev,
576 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
577 regval);
578 return 0;
579 }
580 EXPORT_SYMBOL_GPL(rio_set_port_lockout);
581
582 /**
583 * rio_switch_init - Sets switch operations for a particular vendor switch
584 * @rdev: RIO device
585 * @do_enum: Enumeration/Discovery mode flag
586 *
587 * Searches the RIO switch ops table for known switch types. If the vid
588 * and did match a switch table entry, then call switch initialization
589 * routine to setup switch-specific routines.
590 */
591 void rio_switch_init(struct rio_dev *rdev, int do_enum)
592 {
593 struct rio_switch_ops *cur = __start_rio_switch_ops;
594 struct rio_switch_ops *end = __end_rio_switch_ops;
595
596 while (cur < end) {
597 if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
598 pr_debug("RIO: calling init routine for %s\n",
599 rio_name(rdev));
600 cur->init_hook(rdev, do_enum);
601 break;
602 }
603 cur++;
604 }
605
606 if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
607 pr_debug("RIO: adding STD routing ops for %s\n",
608 rio_name(rdev));
609 rdev->rswitch->add_entry = rio_std_route_add_entry;
610 rdev->rswitch->get_entry = rio_std_route_get_entry;
611 rdev->rswitch->clr_table = rio_std_route_clr_table;
612 }
613
614 if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
615 printk(KERN_ERR "RIO: missing routing ops for %s\n",
616 rio_name(rdev));
617 }
618 EXPORT_SYMBOL_GPL(rio_switch_init);
619
620 /**
621 * rio_enable_rx_tx_port - enable input receiver and output transmitter of
622 * given port
623 * @port: Master port associated with the RIO network
624 * @local: local=1 select local port otherwise a far device is reached
625 * @destid: Destination ID of the device to check host bit
626 * @hopcount: Number of hops to reach the target
627 * @port_num: Port (-number on switch) to enable on a far end device
628 *
629 * Returns 0 or 1 from on General Control Command and Status Register
630 * (EXT_PTR+0x3C)
631 */
632 int rio_enable_rx_tx_port(struct rio_mport *port,
633 int local, u16 destid,
634 u8 hopcount, u8 port_num)
635 {
636 #ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
637 u32 regval;
638 u32 ext_ftr_ptr;
639
640 /*
641 * enable rx input tx output port
642 */
643 pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
644 "%d, port_num = %d)\n", local, destid, hopcount, port_num);
645
646 ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
647
648 if (local) {
649 rio_local_read_config_32(port, ext_ftr_ptr +
650 RIO_PORT_N_CTL_CSR(0),
651 &regval);
652 } else {
653 if (rio_mport_read_config_32(port, destid, hopcount,
654 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
655 return -EIO;
656 }
657
658 if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
659 /* serial */
660 regval = regval | RIO_PORT_N_CTL_EN_RX_SER
661 | RIO_PORT_N_CTL_EN_TX_SER;
662 } else {
663 /* parallel */
664 regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
665 | RIO_PORT_N_CTL_EN_TX_PAR;
666 }
667
668 if (local) {
669 rio_local_write_config_32(port, ext_ftr_ptr +
670 RIO_PORT_N_CTL_CSR(0), regval);
671 } else {
672 if (rio_mport_write_config_32(port, destid, hopcount,
673 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
674 return -EIO;
675 }
676 #endif
677 return 0;
678 }
679 EXPORT_SYMBOL_GPL(rio_enable_rx_tx_port);
680
681
682 /**
683 * rio_chk_dev_route - Validate route to the specified device.
684 * @rdev: RIO device failed to respond
685 * @nrdev: Last active device on the route to rdev
686 * @npnum: nrdev's port number on the route to rdev
687 *
688 * Follows a route to the specified RIO device to determine the last available
689 * device (and corresponding RIO port) on the route.
690 */
691 static int
692 rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)
693 {
694 u32 result;
695 int p_port, rc = -EIO;
696 struct rio_dev *prev = NULL;
697
698 /* Find switch with failed RIO link */
699 while (rdev->prev && (rdev->prev->pef & RIO_PEF_SWITCH)) {
700 if (!rio_read_config_32(rdev->prev, RIO_DEV_ID_CAR, &result)) {
701 prev = rdev->prev;
702 break;
703 }
704 rdev = rdev->prev;
705 }
706
707 if (prev == NULL)
708 goto err_out;
709
710 p_port = prev->rswitch->route_table[rdev->destid];
711
712 if (p_port != RIO_INVALID_ROUTE) {
713 pr_debug("RIO: link failed on [%s]-P%d\n",
714 rio_name(prev), p_port);
715 *nrdev = prev;
716 *npnum = p_port;
717 rc = 0;
718 } else
719 pr_debug("RIO: failed to trace route to %s\n", rio_name(rdev));
720 err_out:
721 return rc;
722 }
723
724 /**
725 * rio_mport_chk_dev_access - Validate access to the specified device.
726 * @mport: Master port to send transactions
727 * @destid: Device destination ID in network
728 * @hopcount: Number of hops into the network
729 */
730 int
731 rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount)
732 {
733 int i = 0;
734 u32 tmp;
735
736 while (rio_mport_read_config_32(mport, destid, hopcount,
737 RIO_DEV_ID_CAR, &tmp)) {
738 i++;
739 if (i == RIO_MAX_CHK_RETRY)
740 return -EIO;
741 mdelay(1);
742 }
743
744 return 0;
745 }
746 EXPORT_SYMBOL_GPL(rio_mport_chk_dev_access);
747
748 /**
749 * rio_chk_dev_access - Validate access to the specified device.
750 * @rdev: Pointer to RIO device control structure
751 */
752 static int rio_chk_dev_access(struct rio_dev *rdev)
753 {
754 return rio_mport_chk_dev_access(rdev->net->hport,
755 rdev->destid, rdev->hopcount);
756 }
757
758 /**
759 * rio_get_input_status - Sends a Link-Request/Input-Status control symbol and
760 * returns link-response (if requested).
761 * @rdev: RIO devive to issue Input-status command
762 * @pnum: Device port number to issue the command
763 * @lnkresp: Response from a link partner
764 */
765 static int
766 rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
767 {
768 u32 regval;
769 int checkcount;
770
771 if (lnkresp) {
772 /* Read from link maintenance response register
773 * to clear valid bit */
774 rio_read_config_32(rdev,
775 rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
776 &regval);
777 udelay(50);
778 }
779
780 /* Issue Input-status command */
781 rio_write_config_32(rdev,
782 rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),
783 RIO_MNT_REQ_CMD_IS);
784
785 /* Exit if the response is not expected */
786 if (lnkresp == NULL)
787 return 0;
788
789 checkcount = 3;
790 while (checkcount--) {
791 udelay(50);
792 rio_read_config_32(rdev,
793 rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
794 &regval);
795 if (regval & RIO_PORT_N_MNT_RSP_RVAL) {
796 *lnkresp = regval;
797 return 0;
798 }
799 }
800
801 return -EIO;
802 }
803
804 /**
805 * rio_clr_err_stopped - Clears port Error-stopped states.
806 * @rdev: Pointer to RIO device control structure
807 * @pnum: Switch port number to clear errors
808 * @err_status: port error status (if 0 reads register from device)
809 */
810 static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
811 {
812 struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum];
813 u32 regval;
814 u32 far_ackid, far_linkstat, near_ackid;
815
816 if (err_status == 0)
817 rio_read_config_32(rdev,
818 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
819 &err_status);
820
821 if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) {
822 pr_debug("RIO_EM: servicing Output Error-Stopped state\n");
823 /*
824 * Send a Link-Request/Input-Status control symbol
825 */
826 if (rio_get_input_status(rdev, pnum, &regval)) {
827 pr_debug("RIO_EM: Input-status response timeout\n");
828 goto rd_err;
829 }
830
831 pr_debug("RIO_EM: SP%d Input-status response=0x%08x\n",
832 pnum, regval);
833 far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;
834 far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT;
835 rio_read_config_32(rdev,
836 rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
837 &regval);
838 pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval);
839 near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24;
840 pr_debug("RIO_EM: SP%d far_ackID=0x%02x far_linkstat=0x%02x" \
841 " near_ackID=0x%02x\n",
842 pnum, far_ackid, far_linkstat, near_ackid);
843
844 /*
845 * If required, synchronize ackIDs of near and
846 * far sides.
847 */
848 if ((far_ackid != ((regval & RIO_PORT_N_ACK_OUTSTAND) >> 8)) ||
849 (far_ackid != (regval & RIO_PORT_N_ACK_OUTBOUND))) {
850 /* Align near outstanding/outbound ackIDs with
851 * far inbound.
852 */
853 rio_write_config_32(rdev,
854 rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
855 (near_ackid << 24) |
856 (far_ackid << 8) | far_ackid);
857 /* Align far outstanding/outbound ackIDs with
858 * near inbound.
859 */
860 far_ackid++;
861 if (nextdev)
862 rio_write_config_32(nextdev,
863 nextdev->phys_efptr +
864 RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)),
865 (far_ackid << 24) |
866 (near_ackid << 8) | near_ackid);
867 else
868 pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
869 }
870 rd_err:
871 rio_read_config_32(rdev,
872 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
873 &err_status);
874 pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
875 }
876
877 if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) {
878 pr_debug("RIO_EM: servicing Input Error-Stopped state\n");
879 rio_get_input_status(nextdev,
880 RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);
881 udelay(50);
882
883 rio_read_config_32(rdev,
884 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
885 &err_status);
886 pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
887 }
888
889 return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
890 RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0;
891 }
892
893 /**
894 * rio_inb_pwrite_handler - process inbound port-write message
895 * @pw_msg: pointer to inbound port-write message
896 *
897 * Processes an inbound port-write message. Returns 0 if the request
898 * has been satisfied.
899 */
900 int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
901 {
902 struct rio_dev *rdev;
903 u32 err_status, em_perrdet, em_ltlerrdet;
904 int rc, portnum;
905
906 rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL);
907 if (rdev == NULL) {
908 /* Device removed or enumeration error */
909 pr_debug("RIO: %s No matching device for CTag 0x%08x\n",
910 __func__, pw_msg->em.comptag);
911 return -EIO;
912 }
913
914 pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev));
915
916 #ifdef DEBUG_PW
917 {
918 u32 i;
919 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) {
920 pr_debug("0x%02x: %08x %08x %08x %08x\n",
921 i*4, pw_msg->raw[i], pw_msg->raw[i + 1],
922 pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
923 i += 4;
924 }
925 }
926 #endif
927
928 /* Call an external service function (if such is registered
929 * for this device). This may be the service for endpoints that send
930 * device-specific port-write messages. End-point messages expected
931 * to be handled completely by EP specific device driver.
932 * For switches rc==0 signals that no standard processing required.
933 */
934 if (rdev->pwcback != NULL) {
935 rc = rdev->pwcback(rdev, pw_msg, 0);
936 if (rc == 0)
937 return 0;
938 }
939
940 portnum = pw_msg->em.is_port & 0xFF;
941
942 /* Check if device and route to it are functional:
943 * Sometimes devices may send PW message(s) just before being
944 * powered down (or link being lost).
945 */
946 if (rio_chk_dev_access(rdev)) {
947 pr_debug("RIO: device access failed - get link partner\n");
948 /* Scan route to the device and identify failed link.
949 * This will replace device and port reported in PW message.
950 * PW message should not be used after this point.
951 */
952 if (rio_chk_dev_route(rdev, &rdev, &portnum)) {
953 pr_err("RIO: Route trace for %s failed\n",
954 rio_name(rdev));
955 return -EIO;
956 }
957 pw_msg = NULL;
958 }
959
960 /* For End-point devices processing stops here */
961 if (!(rdev->pef & RIO_PEF_SWITCH))
962 return 0;
963
964 if (rdev->phys_efptr == 0) {
965 pr_err("RIO_PW: Bad switch initialization for %s\n",
966 rio_name(rdev));
967 return 0;
968 }
969
970 /*
971 * Process the port-write notification from switch
972 */
973 if (rdev->rswitch->em_handle)
974 rdev->rswitch->em_handle(rdev, portnum);
975
976 rio_read_config_32(rdev,
977 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
978 &err_status);
979 pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
980
981 if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
982
983 if (!(rdev->rswitch->port_ok & (1 << portnum))) {
984 rdev->rswitch->port_ok |= (1 << portnum);
985 rio_set_port_lockout(rdev, portnum, 0);
986 /* Schedule Insertion Service */
987 pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
988 rio_name(rdev), portnum);
989 }
990
991 /* Clear error-stopped states (if reported).
992 * Depending on the link partner state, two attempts
993 * may be needed for successful recovery.
994 */
995 if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
996 RIO_PORT_N_ERR_STS_PW_INP_ES)) {
997 if (rio_clr_err_stopped(rdev, portnum, err_status))
998 rio_clr_err_stopped(rdev, portnum, 0);
999 }
1000 } else { /* if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) */
1001
1002 if (rdev->rswitch->port_ok & (1 << portnum)) {
1003 rdev->rswitch->port_ok &= ~(1 << portnum);
1004 rio_set_port_lockout(rdev, portnum, 1);
1005
1006 rio_write_config_32(rdev,
1007 rdev->phys_efptr +
1008 RIO_PORT_N_ACK_STS_CSR(portnum),
1009 RIO_PORT_N_ACK_CLEAR);
1010
1011 /* Schedule Extraction Service */
1012 pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
1013 rio_name(rdev), portnum);
1014 }
1015 }
1016
1017 rio_read_config_32(rdev,
1018 rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
1019 if (em_perrdet) {
1020 pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
1021 portnum, em_perrdet);
1022 /* Clear EM Port N Error Detect CSR */
1023 rio_write_config_32(rdev,
1024 rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
1025 }
1026
1027 rio_read_config_32(rdev,
1028 rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
1029 if (em_ltlerrdet) {
1030 pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
1031 em_ltlerrdet);
1032 /* Clear EM L/T Layer Error Detect CSR */
1033 rio_write_config_32(rdev,
1034 rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
1035 }
1036
1037 /* Clear remaining error bits and Port-Write Pending bit */
1038 rio_write_config_32(rdev,
1039 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
1040 err_status);
1041
1042 return 0;
1043 }
1044 EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler);
1045
1046 /**
1047 * rio_mport_get_efb - get pointer to next extended features block
1048 * @port: Master port to issue transaction
1049 * @local: Indicate a local master port or remote device access
1050 * @destid: Destination ID of the device
1051 * @hopcount: Number of switch hops to the device
1052 * @from: Offset of current Extended Feature block header (if 0 starts
1053 * from ExtFeaturePtr)
1054 */
1055 u32
1056 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
1057 u8 hopcount, u32 from)
1058 {
1059 u32 reg_val;
1060
1061 if (from == 0) {
1062 if (local)
1063 rio_local_read_config_32(port, RIO_ASM_INFO_CAR,
1064 &reg_val);
1065 else
1066 rio_mport_read_config_32(port, destid, hopcount,
1067 RIO_ASM_INFO_CAR, &reg_val);
1068 return reg_val & RIO_EXT_FTR_PTR_MASK;
1069 } else {
1070 if (local)
1071 rio_local_read_config_32(port, from, &reg_val);
1072 else
1073 rio_mport_read_config_32(port, destid, hopcount,
1074 from, &reg_val);
1075 return RIO_GET_BLOCK_ID(reg_val);
1076 }
1077 }
1078 EXPORT_SYMBOL_GPL(rio_mport_get_efb);
1079
1080 /**
1081 * rio_mport_get_feature - query for devices' extended features
1082 * @port: Master port to issue transaction
1083 * @local: Indicate a local master port or remote device access
1084 * @destid: Destination ID of the device
1085 * @hopcount: Number of switch hops to the device
1086 * @ftr: Extended feature code
1087 *
1088 * Tell if a device supports a given RapidIO capability.
1089 * Returns the offset of the requested extended feature
1090 * block within the device's RIO configuration space or
1091 * 0 in case the device does not support it. Possible
1092 * values for @ftr:
1093 *
1094 * %RIO_EFB_PAR_EP_ID LP/LVDS EP Devices
1095 *
1096 * %RIO_EFB_PAR_EP_REC_ID LP/LVDS EP Recovery Devices
1097 *
1098 * %RIO_EFB_PAR_EP_FREE_ID LP/LVDS EP Free Devices
1099 *
1100 * %RIO_EFB_SER_EP_ID LP/Serial EP Devices
1101 *
1102 * %RIO_EFB_SER_EP_REC_ID LP/Serial EP Recovery Devices
1103 *
1104 * %RIO_EFB_SER_EP_FREE_ID LP/Serial EP Free Devices
1105 */
1106 u32
1107 rio_mport_get_feature(struct rio_mport * port, int local, u16 destid,
1108 u8 hopcount, int ftr)
1109 {
1110 u32 asm_info, ext_ftr_ptr, ftr_header;
1111
1112 if (local)
1113 rio_local_read_config_32(port, RIO_ASM_INFO_CAR, &asm_info);
1114 else
1115 rio_mport_read_config_32(port, destid, hopcount,
1116 RIO_ASM_INFO_CAR, &asm_info);
1117
1118 ext_ftr_ptr = asm_info & RIO_EXT_FTR_PTR_MASK;
1119
1120 while (ext_ftr_ptr) {
1121 if (local)
1122 rio_local_read_config_32(port, ext_ftr_ptr,
1123 &ftr_header);
1124 else
1125 rio_mport_read_config_32(port, destid, hopcount,
1126 ext_ftr_ptr, &ftr_header);
1127 if (RIO_GET_BLOCK_ID(ftr_header) == ftr)
1128 return ext_ftr_ptr;
1129 if (!(ext_ftr_ptr = RIO_GET_BLOCK_PTR(ftr_header)))
1130 break;
1131 }
1132
1133 return 0;
1134 }
1135 EXPORT_SYMBOL_GPL(rio_mport_get_feature);
1136
1137 /**
1138 * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did
1139 * @vid: RIO vid to match or %RIO_ANY_ID to match all vids
1140 * @did: RIO did to match or %RIO_ANY_ID to match all dids
1141 * @asm_vid: RIO asm_vid to match or %RIO_ANY_ID to match all asm_vids
1142 * @asm_did: RIO asm_did to match or %RIO_ANY_ID to match all asm_dids
1143 * @from: Previous RIO device found in search, or %NULL for new search
1144 *
1145 * Iterates through the list of known RIO devices. If a RIO device is
1146 * found with a matching @vid, @did, @asm_vid, @asm_did, the reference
1147 * count to the device is incrememted and a pointer to its device
1148 * structure is returned. Otherwise, %NULL is returned. A new search
1149 * is initiated by passing %NULL to the @from argument. Otherwise, if
1150 * @from is not %NULL, searches continue from next device on the global
1151 * list. The reference count for @from is always decremented if it is
1152 * not %NULL.
1153 */
1154 struct rio_dev *rio_get_asm(u16 vid, u16 did,
1155 u16 asm_vid, u16 asm_did, struct rio_dev *from)
1156 {
1157 struct list_head *n;
1158 struct rio_dev *rdev;
1159
1160 WARN_ON(in_interrupt());
1161 spin_lock(&rio_global_list_lock);
1162 n = from ? from->global_list.next : rio_devices.next;
1163
1164 while (n && (n != &rio_devices)) {
1165 rdev = rio_dev_g(n);
1166 if ((vid == RIO_ANY_ID || rdev->vid == vid) &&
1167 (did == RIO_ANY_ID || rdev->did == did) &&
1168 (asm_vid == RIO_ANY_ID || rdev->asm_vid == asm_vid) &&
1169 (asm_did == RIO_ANY_ID || rdev->asm_did == asm_did))
1170 goto exit;
1171 n = n->next;
1172 }
1173 rdev = NULL;
1174 exit:
1175 rio_dev_put(from);
1176 rdev = rio_dev_get(rdev);
1177 spin_unlock(&rio_global_list_lock);
1178 return rdev;
1179 }
1180
1181 /**
1182 * rio_get_device - Begin or continue searching for a RIO device by vid/did
1183 * @vid: RIO vid to match or %RIO_ANY_ID to match all vids
1184 * @did: RIO did to match or %RIO_ANY_ID to match all dids
1185 * @from: Previous RIO device found in search, or %NULL for new search
1186 *
1187 * Iterates through the list of known RIO devices. If a RIO device is
1188 * found with a matching @vid and @did, the reference count to the
1189 * device is incrememted and a pointer to its device structure is returned.
1190 * Otherwise, %NULL is returned. A new search is initiated by passing %NULL
1191 * to the @from argument. Otherwise, if @from is not %NULL, searches
1192 * continue from next device on the global list. The reference count for
1193 * @from is always decremented if it is not %NULL.
1194 */
1195 struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from)
1196 {
1197 return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from);
1198 }
1199
1200 /**
1201 * rio_std_route_add_entry - Add switch route table entry using standard
1202 * registers defined in RIO specification rev.1.3
1203 * @mport: Master port to issue transaction
1204 * @destid: Destination ID of the device
1205 * @hopcount: Number of switch hops to the device
1206 * @table: routing table ID (global or port-specific)
1207 * @route_destid: destID entry in the RT
1208 * @route_port: destination port for specified destID
1209 */
1210 int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
1211 u16 table, u16 route_destid, u8 route_port)
1212 {
1213 if (table == RIO_GLOBAL_TABLE) {
1214 rio_mport_write_config_32(mport, destid, hopcount,
1215 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
1216 (u32)route_destid);
1217 rio_mport_write_config_32(mport, destid, hopcount,
1218 RIO_STD_RTE_CONF_PORT_SEL_CSR,
1219 (u32)route_port);
1220 }
1221
1222 udelay(10);
1223 return 0;
1224 }
1225
1226 /**
1227 * rio_std_route_get_entry - Read switch route table entry (port number)
1228 * associated with specified destID using standard registers defined in RIO
1229 * specification rev.1.3
1230 * @mport: Master port to issue transaction
1231 * @destid: Destination ID of the device
1232 * @hopcount: Number of switch hops to the device
1233 * @table: routing table ID (global or port-specific)
1234 * @route_destid: destID entry in the RT
1235 * @route_port: returned destination port for specified destID
1236 */
1237 int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
1238 u16 table, u16 route_destid, u8 *route_port)
1239 {
1240 u32 result;
1241
1242 if (table == RIO_GLOBAL_TABLE) {
1243 rio_mport_write_config_32(mport, destid, hopcount,
1244 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
1245 rio_mport_read_config_32(mport, destid, hopcount,
1246 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
1247
1248 *route_port = (u8)result;
1249 }
1250
1251 return 0;
1252 }
1253
1254 /**
1255 * rio_std_route_clr_table - Clear swotch route table using standard registers
1256 * defined in RIO specification rev.1.3.
1257 * @mport: Master port to issue transaction
1258 * @destid: Destination ID of the device
1259 * @hopcount: Number of switch hops to the device
1260 * @table: routing table ID (global or port-specific)
1261 */
1262 int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
1263 u16 table)
1264 {
1265 u32 max_destid = 0xff;
1266 u32 i, pef, id_inc = 1, ext_cfg = 0;
1267 u32 port_sel = RIO_INVALID_ROUTE;
1268
1269 if (table == RIO_GLOBAL_TABLE) {
1270 rio_mport_read_config_32(mport, destid, hopcount,
1271 RIO_PEF_CAR, &pef);
1272
1273 if (mport->sys_size) {
1274 rio_mport_read_config_32(mport, destid, hopcount,
1275 RIO_SWITCH_RT_LIMIT,
1276 &max_destid);
1277 max_destid &= RIO_RT_MAX_DESTID;
1278 }
1279
1280 if (pef & RIO_PEF_EXT_RT) {
1281 ext_cfg = 0x80000000;
1282 id_inc = 4;
1283 port_sel = (RIO_INVALID_ROUTE << 24) |
1284 (RIO_INVALID_ROUTE << 16) |
1285 (RIO_INVALID_ROUTE << 8) |
1286 RIO_INVALID_ROUTE;
1287 }
1288
1289 for (i = 0; i <= max_destid;) {
1290 rio_mport_write_config_32(mport, destid, hopcount,
1291 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
1292 ext_cfg | i);
1293 rio_mport_write_config_32(mport, destid, hopcount,
1294 RIO_STD_RTE_CONF_PORT_SEL_CSR,
1295 port_sel);
1296 i += id_inc;
1297 }
1298 }
1299
1300 udelay(10);
1301 return 0;
1302 }
1303
1304 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1305
1306 static bool rio_chan_filter(struct dma_chan *chan, void *arg)
1307 {
1308 struct rio_dev *rdev = arg;
1309
1310 /* Check that DMA device belongs to the right MPORT */
1311 return (rdev->net->hport ==
1312 container_of(chan->device, struct rio_mport, dma));
1313 }
1314
1315 /**
1316 * rio_request_dma - request RapidIO capable DMA channel that supports
1317 * specified target RapidIO device.
1318 * @rdev: RIO device control structure
1319 *
1320 * Returns pointer to allocated DMA channel or NULL if failed.
1321 */
1322 struct dma_chan *rio_request_dma(struct rio_dev *rdev)
1323 {
1324 dma_cap_mask_t mask;
1325 struct dma_chan *dchan;
1326
1327 dma_cap_zero(mask);
1328 dma_cap_set(DMA_SLAVE, mask);
1329 dchan = dma_request_channel(mask, rio_chan_filter, rdev);
1330
1331 return dchan;
1332 }
1333 EXPORT_SYMBOL_GPL(rio_request_dma);
1334
1335 /**
1336 * rio_release_dma - release specified DMA channel
1337 * @dchan: DMA channel to release
1338 */
1339 void rio_release_dma(struct dma_chan *dchan)
1340 {
1341 dma_release_channel(dchan);
1342 }
1343 EXPORT_SYMBOL_GPL(rio_release_dma);
1344
1345 /**
1346 * rio_dma_prep_slave_sg - RapidIO specific wrapper
1347 * for device_prep_slave_sg callback defined by DMAENGINE.
1348 * @rdev: RIO device control structure
1349 * @dchan: DMA channel to configure
1350 * @data: RIO specific data descriptor
1351 * @direction: DMA data transfer direction (TO or FROM the device)
1352 * @flags: dmaengine defined flags
1353 *
1354 * Initializes RapidIO capable DMA channel for the specified data transfer.
1355 * Uses DMA channel private extension to pass information related to remote
1356 * target RIO device.
1357 * Returns pointer to DMA transaction descriptor or NULL if failed.
1358 */
1359 struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
1360 struct dma_chan *dchan, struct rio_dma_data *data,
1361 enum dma_transfer_direction direction, unsigned long flags)
1362 {
1363 struct dma_async_tx_descriptor *txd = NULL;
1364 struct rio_dma_ext rio_ext;
1365
1366 if (dchan->device->device_prep_slave_sg == NULL) {
1367 pr_err("%s: prep_rio_sg == NULL\n", __func__);
1368 return NULL;
1369 }
1370
1371 rio_ext.destid = rdev->destid;
1372 rio_ext.rio_addr_u = data->rio_addr_u;
1373 rio_ext.rio_addr = data->rio_addr;
1374 rio_ext.wr_type = data->wr_type;
1375
1376 txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
1377 direction, flags, &rio_ext);
1378
1379 return txd;
1380 }
1381 EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
1382
1383 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1384
1385 /**
1386 * rio_find_mport - find RIO mport by its ID
1387 * @mport_id: number (ID) of mport device
1388 *
1389 * Given a RIO mport number, the desired mport is located
1390 * in the global list of mports. If the mport is found, a pointer to its
1391 * data structure is returned. If no mport is found, %NULL is returned.
1392 */
1393 struct rio_mport *rio_find_mport(int mport_id)
1394 {
1395 struct rio_mport *port;
1396
1397 mutex_lock(&rio_mport_list_lock);
1398 list_for_each_entry(port, &rio_mports, node) {
1399 if (port->id == mport_id)
1400 goto found;
1401 }
1402 port = NULL;
1403 found:
1404 mutex_unlock(&rio_mport_list_lock);
1405
1406 return port;
1407 }
1408
1409 /**
1410 * rio_register_scan - enumeration/discovery method registration interface
1411 * @mport_id: mport device ID for which fabric scan routine has to be set
1412 * (RIO_MPORT_ANY = set for all available mports)
1413 * @scan_ops: enumeration/discovery control structure
1414 *
1415 * Assigns enumeration or discovery method to the specified mport device (or all
1416 * available mports if RIO_MPORT_ANY is specified).
1417 * Returns error if the mport already has an enumerator attached to it.
1418 * In case of RIO_MPORT_ANY ignores ports with valid scan routines and returns
1419 * an error if was unable to find at least one available mport.
1420 */
1421 int rio_register_scan(int mport_id, struct rio_scan *scan_ops)
1422 {
1423 struct rio_mport *port;
1424 int rc = -EBUSY;
1425
1426 mutex_lock(&rio_mport_list_lock);
1427 list_for_each_entry(port, &rio_mports, node) {
1428 if (port->id == mport_id || mport_id == RIO_MPORT_ANY) {
1429 if (port->nscan && mport_id == RIO_MPORT_ANY)
1430 continue;
1431 else if (port->nscan)
1432 break;
1433
1434 port->nscan = scan_ops;
1435 rc = 0;
1436
1437 if (mport_id != RIO_MPORT_ANY)
1438 break;
1439 }
1440 }
1441 mutex_unlock(&rio_mport_list_lock);
1442
1443 return rc;
1444 }
1445 EXPORT_SYMBOL_GPL(rio_register_scan);
1446
1447 /**
1448 * rio_unregister_scan - removes enumeration/discovery method from mport
1449 * @mport_id: mport device ID for which fabric scan routine has to be
1450 * unregistered (RIO_MPORT_ANY = set for all available mports)
1451 *
1452 * Removes enumeration or discovery method assigned to the specified mport
1453 * device (or all available mports if RIO_MPORT_ANY is specified).
1454 */
1455 int rio_unregister_scan(int mport_id)
1456 {
1457 struct rio_mport *port;
1458
1459 mutex_lock(&rio_mport_list_lock);
1460 list_for_each_entry(port, &rio_mports, node) {
1461 if (port->id == mport_id || mport_id == RIO_MPORT_ANY) {
1462 if (port->nscan)
1463 port->nscan = NULL;
1464 if (mport_id != RIO_MPORT_ANY)
1465 break;
1466 }
1467 }
1468 mutex_unlock(&rio_mport_list_lock);
1469
1470 return 0;
1471 }
1472 EXPORT_SYMBOL_GPL(rio_unregister_scan);
1473
1474 static void rio_fixup_device(struct rio_dev *dev)
1475 {
1476 }
1477
1478 static int rio_init(void)
1479 {
1480 struct rio_dev *dev = NULL;
1481
1482 while ((dev = rio_get_device(RIO_ANY_ID, RIO_ANY_ID, dev)) != NULL) {
1483 rio_fixup_device(dev);
1484 }
1485 return 0;
1486 }
1487
1488 static struct workqueue_struct *rio_wq;
1489
1490 struct rio_disc_work {
1491 struct work_struct work;
1492 struct rio_mport *mport;
1493 };
1494
1495 static void disc_work_handler(struct work_struct *_work)
1496 {
1497 struct rio_disc_work *work;
1498
1499 work = container_of(_work, struct rio_disc_work, work);
1500 pr_debug("RIO: discovery work for mport %d %s\n",
1501 work->mport->id, work->mport->name);
1502 work->mport->nscan->discover(work->mport, 0);
1503 }
1504
1505 int rio_init_mports(void)
1506 {
1507 struct rio_mport *port;
1508 struct rio_disc_work *work;
1509 int n = 0;
1510
1511 if (!next_portid)
1512 return -ENODEV;
1513
1514 /*
1515 * First, run enumerations and check if we need to perform discovery
1516 * on any of the registered mports.
1517 */
1518 mutex_lock(&rio_mport_list_lock);
1519 list_for_each_entry(port, &rio_mports, node) {
1520 if (port->host_deviceid >= 0) {
1521 if (port->nscan)
1522 port->nscan->enumerate(port, 0);
1523 } else
1524 n++;
1525 }
1526 mutex_unlock(&rio_mport_list_lock);
1527
1528 if (!n)
1529 goto no_disc;
1530
1531 /*
1532 * If we have mports that require discovery schedule a discovery work
1533 * for each of them. If the code below fails to allocate needed
1534 * resources, exit without error to keep results of enumeration
1535 * process (if any).
1536 * TODO: Implement restart of dicovery process for all or
1537 * individual discovering mports.
1538 */
1539 rio_wq = alloc_workqueue("riodisc", 0, 0);
1540 if (!rio_wq) {
1541 pr_err("RIO: unable allocate rio_wq\n");
1542 goto no_disc;
1543 }
1544
1545 work = kcalloc(n, sizeof *work, GFP_KERNEL);
1546 if (!work) {
1547 pr_err("RIO: no memory for work struct\n");
1548 destroy_workqueue(rio_wq);
1549 goto no_disc;
1550 }
1551
1552 n = 0;
1553 mutex_lock(&rio_mport_list_lock);
1554 list_for_each_entry(port, &rio_mports, node) {
1555 if (port->host_deviceid < 0 && port->nscan) {
1556 work[n].mport = port;
1557 INIT_WORK(&work[n].work, disc_work_handler);
1558 queue_work(rio_wq, &work[n].work);
1559 n++;
1560 }
1561 }
1562 mutex_unlock(&rio_mport_list_lock);
1563
1564 flush_workqueue(rio_wq);
1565 pr_debug("RIO: destroy discovery workqueue\n");
1566 destroy_workqueue(rio_wq);
1567 kfree(work);
1568
1569 no_disc:
1570 rio_init();
1571
1572 return 0;
1573 }
1574
1575 static int hdids[RIO_MAX_MPORTS + 1];
1576
1577 static int rio_get_hdid(int index)
1578 {
1579 if (!hdids[0] || hdids[0] <= index || index >= RIO_MAX_MPORTS)
1580 return -1;
1581
1582 return hdids[index + 1];
1583 }
1584
1585 static int rio_hdid_setup(char *str)
1586 {
1587 (void)get_options(str, ARRAY_SIZE(hdids), hdids);
1588 return 1;
1589 }
1590
1591 __setup("riohdid=", rio_hdid_setup);
1592
1593 int rio_register_mport(struct rio_mport *port)
1594 {
1595 if (next_portid >= RIO_MAX_MPORTS) {
1596 pr_err("RIO: reached specified max number of mports\n");
1597 return 1;
1598 }
1599
1600 port->id = next_portid++;
1601 port->host_deviceid = rio_get_hdid(port->id);
1602 port->nscan = NULL;
1603 mutex_lock(&rio_mport_list_lock);
1604 list_add_tail(&port->node, &rio_mports);
1605 mutex_unlock(&rio_mport_list_lock);
1606 return 0;
1607 }
1608
1609 EXPORT_SYMBOL_GPL(rio_local_get_device_id);
1610 EXPORT_SYMBOL_GPL(rio_get_device);
1611 EXPORT_SYMBOL_GPL(rio_get_asm);
1612 EXPORT_SYMBOL_GPL(rio_request_inb_dbell);
1613 EXPORT_SYMBOL_GPL(rio_release_inb_dbell);
1614 EXPORT_SYMBOL_GPL(rio_request_outb_dbell);
1615 EXPORT_SYMBOL_GPL(rio_release_outb_dbell);
1616 EXPORT_SYMBOL_GPL(rio_request_inb_mbox);
1617 EXPORT_SYMBOL_GPL(rio_release_inb_mbox);
1618 EXPORT_SYMBOL_GPL(rio_request_outb_mbox);
1619 EXPORT_SYMBOL_GPL(rio_release_outb_mbox);
1620 EXPORT_SYMBOL_GPL(rio_init_mports);