65c91d3735de9ca439b5b36d5c71877aac7e25f4
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / drivers / usb / gadget / goku_udc.c
1 /*
2 * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
3 *
4 * Copyright (C) 2000-2002 Lineo
5 * by Stuart Lynne, Tom Rushworth, and Bruce Balden
6 * Copyright (C) 2002 Toshiba Corporation
7 * Copyright (C) 2003 MontaVista Software (source@mvista.com)
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14 /*
15 * This device has ep0 and three semi-configurable bulk/interrupt endpoints.
16 *
17 * - Endpoint numbering is fixed: ep{1,2,3}-bulk
18 * - Gadget drivers can choose ep maxpacket (8/16/32/64)
19 * - Gadget drivers can choose direction (IN, OUT)
20 * - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
21 */
22
23 #undef DEBUG
24 // #define VERBOSE /* extra debug messages (success too) */
25 // #define USB_TRACE /* packet-level success messages */
26
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/delay.h>
31 #include <linux/ioport.h>
32 #include <linux/slab.h>
33 #include <linux/smp_lock.h>
34 #include <linux/errno.h>
35 #include <linux/init.h>
36 #include <linux/timer.h>
37 #include <linux/list.h>
38 #include <linux/interrupt.h>
39 #include <linux/proc_fs.h>
40 #include <linux/device.h>
41 #include <linux/usb/ch9.h>
42 #include <linux/usb_gadget.h>
43
44 #include <asm/byteorder.h>
45 #include <asm/io.h>
46 #include <asm/irq.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
49
50
51 #include "goku_udc.h"
52
53 #define DRIVER_DESC "TC86C001 USB Device Controller"
54 #define DRIVER_VERSION "30-Oct 2003"
55
56 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
57
58 static const char driver_name [] = "goku_udc";
59 static const char driver_desc [] = DRIVER_DESC;
60
61 MODULE_AUTHOR("source@mvista.com");
62 MODULE_DESCRIPTION(DRIVER_DESC);
63 MODULE_LICENSE("GPL");
64
65
66 /*
67 * IN dma behaves ok under testing, though the IN-dma abort paths don't
68 * seem to behave quite as expected. Used by default.
69 *
70 * OUT dma documents design problems handling the common "short packet"
71 * transfer termination policy; it couldn't be enabled by default, even
72 * if the OUT-dma abort problems had a resolution.
73 */
74 static unsigned use_dma = 1;
75
76 #if 0
77 //#include <linux/moduleparam.h>
78 /* "modprobe goku_udc use_dma=1" etc
79 * 0 to disable dma
80 * 1 to use IN dma only (normal operation)
81 * 2 to use IN and OUT dma
82 */
83 module_param(use_dma, uint, S_IRUGO);
84 #endif
85
86 /*-------------------------------------------------------------------------*/
87
88 static void nuke(struct goku_ep *, int status);
89
90 static inline void
91 command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
92 {
93 writel(COMMAND_EP(epnum) | command, &regs->Command);
94 udelay(300);
95 }
96
97 static int
98 goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
99 {
100 struct goku_udc *dev;
101 struct goku_ep *ep;
102 u32 mode;
103 u16 max;
104 unsigned long flags;
105
106 ep = container_of(_ep, struct goku_ep, ep);
107 if (!_ep || !desc || ep->desc
108 || desc->bDescriptorType != USB_DT_ENDPOINT)
109 return -EINVAL;
110 dev = ep->dev;
111 if (ep == &dev->ep[0])
112 return -EINVAL;
113 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
114 return -ESHUTDOWN;
115 if (ep->num != (desc->bEndpointAddress & 0x0f))
116 return -EINVAL;
117
118 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
119 case USB_ENDPOINT_XFER_BULK:
120 case USB_ENDPOINT_XFER_INT:
121 break;
122 default:
123 return -EINVAL;
124 }
125
126 if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
127 != EPxSTATUS_EP_INVALID)
128 return -EBUSY;
129
130 /* enabling the no-toggle interrupt mode would need an api hook */
131 mode = 0;
132 max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize));
133 switch (max) {
134 case 64: mode++;
135 case 32: mode++;
136 case 16: mode++;
137 case 8: mode <<= 3;
138 break;
139 default:
140 return -EINVAL;
141 }
142 mode |= 2 << 1; /* bulk, or intr-with-toggle */
143
144 /* ep1/ep2 dma direction is chosen early; it works in the other
145 * direction, with pio. be cautious with out-dma.
146 */
147 ep->is_in = (USB_DIR_IN & desc->bEndpointAddress) != 0;
148 if (ep->is_in) {
149 mode |= 1;
150 ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
151 } else {
152 ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
153 if (ep->dma)
154 DBG(dev, "%s out-dma hides short packets\n",
155 ep->ep.name);
156 }
157
158 spin_lock_irqsave(&ep->dev->lock, flags);
159
160 /* ep1 and ep2 can do double buffering and/or dma */
161 if (ep->num < 3) {
162 struct goku_udc_regs __iomem *regs = ep->dev->regs;
163 u32 tmp;
164
165 /* double buffer except (for now) with pio in */
166 tmp = ((ep->dma || !ep->is_in)
167 ? 0x10 /* double buffered */
168 : 0x11 /* single buffer */
169 ) << ep->num;
170 tmp |= readl(&regs->EPxSingle);
171 writel(tmp, &regs->EPxSingle);
172
173 tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
174 tmp |= readl(&regs->EPxBCS);
175 writel(tmp, &regs->EPxBCS);
176 }
177 writel(mode, ep->reg_mode);
178 command(ep->dev->regs, COMMAND_RESET, ep->num);
179 ep->ep.maxpacket = max;
180 ep->stopped = 0;
181 ep->desc = desc;
182 spin_unlock_irqrestore(&ep->dev->lock, flags);
183
184 DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
185 ep->is_in ? "IN" : "OUT",
186 ep->dma ? "dma" : "pio",
187 max);
188
189 return 0;
190 }
191
192 static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
193 {
194 struct goku_udc *dev = ep->dev;
195
196 if (regs) {
197 command(regs, COMMAND_INVALID, ep->num);
198 if (ep->num) {
199 if (ep->num == UDC_MSTWR_ENDPOINT)
200 dev->int_enable &= ~(INT_MSTWREND
201 |INT_MSTWRTMOUT);
202 else if (ep->num == UDC_MSTRD_ENDPOINT)
203 dev->int_enable &= ~INT_MSTRDEND;
204 dev->int_enable &= ~INT_EPxDATASET (ep->num);
205 } else
206 dev->int_enable &= ~INT_EP0;
207 writel(dev->int_enable, &regs->int_enable);
208 readl(&regs->int_enable);
209 if (ep->num < 3) {
210 struct goku_udc_regs __iomem *r = ep->dev->regs;
211 u32 tmp;
212
213 tmp = readl(&r->EPxSingle);
214 tmp &= ~(0x11 << ep->num);
215 writel(tmp, &r->EPxSingle);
216
217 tmp = readl(&r->EPxBCS);
218 tmp &= ~(0x11 << ep->num);
219 writel(tmp, &r->EPxBCS);
220 }
221 /* reset dma in case we're still using it */
222 if (ep->dma) {
223 u32 master;
224
225 master = readl(&regs->dma_master) & MST_RW_BITS;
226 if (ep->num == UDC_MSTWR_ENDPOINT) {
227 master &= ~MST_W_BITS;
228 master |= MST_WR_RESET;
229 } else {
230 master &= ~MST_R_BITS;
231 master |= MST_RD_RESET;
232 }
233 writel(master, &regs->dma_master);
234 }
235 }
236
237 ep->ep.maxpacket = MAX_FIFO_SIZE;
238 ep->desc = NULL;
239 ep->stopped = 1;
240 ep->irqs = 0;
241 ep->dma = 0;
242 }
243
244 static int goku_ep_disable(struct usb_ep *_ep)
245 {
246 struct goku_ep *ep;
247 struct goku_udc *dev;
248 unsigned long flags;
249
250 ep = container_of(_ep, struct goku_ep, ep);
251 if (!_ep || !ep->desc)
252 return -ENODEV;
253 dev = ep->dev;
254 if (dev->ep0state == EP0_SUSPEND)
255 return -EBUSY;
256
257 VDBG(dev, "disable %s\n", _ep->name);
258
259 spin_lock_irqsave(&dev->lock, flags);
260 nuke(ep, -ESHUTDOWN);
261 ep_reset(dev->regs, ep);
262 spin_unlock_irqrestore(&dev->lock, flags);
263
264 return 0;
265 }
266
267 /*-------------------------------------------------------------------------*/
268
269 static struct usb_request *
270 goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
271 {
272 struct goku_request *req;
273
274 if (!_ep)
275 return NULL;
276 req = kzalloc(sizeof *req, gfp_flags);
277 if (!req)
278 return NULL;
279
280 req->req.dma = DMA_ADDR_INVALID;
281 INIT_LIST_HEAD(&req->queue);
282 return &req->req;
283 }
284
285 static void
286 goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
287 {
288 struct goku_request *req;
289
290 if (!_ep || !_req)
291 return;
292
293 req = container_of(_req, struct goku_request, req);
294 WARN_ON(!list_empty(&req->queue));
295 kfree(req);
296 }
297
298 /*-------------------------------------------------------------------------*/
299
300 /* allocating buffers this way eliminates dma mapping overhead, which
301 * on some platforms will mean eliminating a per-io buffer copy. with
302 * some kinds of system caches, further tweaks may still be needed.
303 */
304 static void *
305 goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
306 dma_addr_t *dma, gfp_t gfp_flags)
307 {
308 void *retval;
309 struct goku_ep *ep;
310
311 ep = container_of(_ep, struct goku_ep, ep);
312 if (!_ep)
313 return NULL;
314 *dma = DMA_ADDR_INVALID;
315
316 if (ep->dma) {
317 /* the main problem with this call is that it wastes memory
318 * on typical 1/N page allocations: it allocates 1-N pages.
319 */
320 #warning Using dma_alloc_coherent even with buffers smaller than a page.
321 retval = dma_alloc_coherent(&ep->dev->pdev->dev,
322 bytes, dma, gfp_flags);
323 } else
324 retval = kmalloc(bytes, gfp_flags);
325 return retval;
326 }
327
328 static void
329 goku_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma, unsigned bytes)
330 {
331 /* free memory into the right allocator */
332 if (dma != DMA_ADDR_INVALID) {
333 struct goku_ep *ep;
334
335 ep = container_of(_ep, struct goku_ep, ep);
336 if (!_ep)
337 return;
338 dma_free_coherent(&ep->dev->pdev->dev, bytes, buf, dma);
339 } else
340 kfree (buf);
341 }
342
343 /*-------------------------------------------------------------------------*/
344
345 static void
346 done(struct goku_ep *ep, struct goku_request *req, int status)
347 {
348 struct goku_udc *dev;
349 unsigned stopped = ep->stopped;
350
351 list_del_init(&req->queue);
352
353 if (likely(req->req.status == -EINPROGRESS))
354 req->req.status = status;
355 else
356 status = req->req.status;
357
358 dev = ep->dev;
359 if (req->mapped) {
360 pci_unmap_single(dev->pdev, req->req.dma, req->req.length,
361 ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
362 req->req.dma = DMA_ADDR_INVALID;
363 req->mapped = 0;
364 }
365
366 #ifndef USB_TRACE
367 if (status && status != -ESHUTDOWN)
368 #endif
369 VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
370 ep->ep.name, &req->req, status,
371 req->req.actual, req->req.length);
372
373 /* don't modify queue heads during completion callback */
374 ep->stopped = 1;
375 spin_unlock(&dev->lock);
376 req->req.complete(&ep->ep, &req->req);
377 spin_lock(&dev->lock);
378 ep->stopped = stopped;
379 }
380
381 /*-------------------------------------------------------------------------*/
382
383 static inline int
384 write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
385 {
386 unsigned length, count;
387
388 length = min(req->req.length - req->req.actual, max);
389 req->req.actual += length;
390
391 count = length;
392 while (likely(count--))
393 writel(*buf++, fifo);
394 return length;
395 }
396
397 // return: 0 = still running, 1 = completed, negative = errno
398 static int write_fifo(struct goku_ep *ep, struct goku_request *req)
399 {
400 struct goku_udc *dev = ep->dev;
401 u32 tmp;
402 u8 *buf;
403 unsigned count;
404 int is_last;
405
406 tmp = readl(&dev->regs->DataSet);
407 buf = req->req.buf + req->req.actual;
408 prefetch(buf);
409
410 dev = ep->dev;
411 if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
412 return -EL2HLT;
413
414 /* NOTE: just single-buffered PIO-IN for now. */
415 if (unlikely((tmp & DATASET_A(ep->num)) != 0))
416 return 0;
417
418 /* clear our "packet available" irq */
419 if (ep->num != 0)
420 writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
421
422 count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
423
424 /* last packet often short (sometimes a zlp, especially on ep0) */
425 if (unlikely(count != ep->ep.maxpacket)) {
426 writel(~(1<<ep->num), &dev->regs->EOP);
427 if (ep->num == 0) {
428 dev->ep[0].stopped = 1;
429 dev->ep0state = EP0_STATUS;
430 }
431 is_last = 1;
432 } else {
433 if (likely(req->req.length != req->req.actual)
434 || req->req.zero)
435 is_last = 0;
436 else
437 is_last = 1;
438 }
439 #if 0 /* printk seemed to trash is_last...*/
440 //#ifdef USB_TRACE
441 VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
442 ep->ep.name, count, is_last ? "/last" : "",
443 req->req.length - req->req.actual, req);
444 #endif
445
446 /* requests complete when all IN data is in the FIFO,
447 * or sometimes later, if a zlp was needed.
448 */
449 if (is_last) {
450 done(ep, req, 0);
451 return 1;
452 }
453
454 return 0;
455 }
456
457 static int read_fifo(struct goku_ep *ep, struct goku_request *req)
458 {
459 struct goku_udc_regs __iomem *regs;
460 u32 size, set;
461 u8 *buf;
462 unsigned bufferspace, is_short, dbuff;
463
464 regs = ep->dev->regs;
465 top:
466 buf = req->req.buf + req->req.actual;
467 prefetchw(buf);
468
469 if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
470 return -EL2HLT;
471
472 dbuff = (ep->num == 1 || ep->num == 2);
473 do {
474 /* ack dataset irq matching the status we'll handle */
475 if (ep->num != 0)
476 writel(~INT_EPxDATASET(ep->num), &regs->int_status);
477
478 set = readl(&regs->DataSet) & DATASET_AB(ep->num);
479 size = readl(&regs->EPxSizeLA[ep->num]);
480 bufferspace = req->req.length - req->req.actual;
481
482 /* usually do nothing without an OUT packet */
483 if (likely(ep->num != 0 || bufferspace != 0)) {
484 if (unlikely(set == 0))
485 break;
486 /* use ep1/ep2 double-buffering for OUT */
487 if (!(size & PACKET_ACTIVE))
488 size = readl(&regs->EPxSizeLB[ep->num]);
489 if (!(size & PACKET_ACTIVE)) // "can't happen"
490 break;
491 size &= DATASIZE; /* EPxSizeH == 0 */
492
493 /* ep0out no-out-data case for set_config, etc */
494 } else
495 size = 0;
496
497 /* read all bytes from this packet */
498 req->req.actual += size;
499 is_short = (size < ep->ep.maxpacket);
500 #ifdef USB_TRACE
501 VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
502 ep->ep.name, size, is_short ? "/S" : "",
503 req, req->req.actual, req->req.length);
504 #endif
505 while (likely(size-- != 0)) {
506 u8 byte = (u8) readl(ep->reg_fifo);
507
508 if (unlikely(bufferspace == 0)) {
509 /* this happens when the driver's buffer
510 * is smaller than what the host sent.
511 * discard the extra data in this packet.
512 */
513 if (req->req.status != -EOVERFLOW)
514 DBG(ep->dev, "%s overflow %u\n",
515 ep->ep.name, size);
516 req->req.status = -EOVERFLOW;
517 } else {
518 *buf++ = byte;
519 bufferspace--;
520 }
521 }
522
523 /* completion */
524 if (unlikely(is_short || req->req.actual == req->req.length)) {
525 if (unlikely(ep->num == 0)) {
526 /* non-control endpoints now usable? */
527 if (ep->dev->req_config)
528 writel(ep->dev->configured
529 ? USBSTATE_CONFIGURED
530 : 0,
531 &regs->UsbState);
532 /* ep0out status stage */
533 writel(~(1<<0), &regs->EOP);
534 ep->stopped = 1;
535 ep->dev->ep0state = EP0_STATUS;
536 }
537 done(ep, req, 0);
538
539 /* empty the second buffer asap */
540 if (dbuff && !list_empty(&ep->queue)) {
541 req = list_entry(ep->queue.next,
542 struct goku_request, queue);
543 goto top;
544 }
545 return 1;
546 }
547 } while (dbuff);
548 return 0;
549 }
550
551 static inline void
552 pio_irq_enable(struct goku_udc *dev,
553 struct goku_udc_regs __iomem *regs, int epnum)
554 {
555 dev->int_enable |= INT_EPxDATASET (epnum);
556 writel(dev->int_enable, &regs->int_enable);
557 /* write may still be posted */
558 }
559
560 static inline void
561 pio_irq_disable(struct goku_udc *dev,
562 struct goku_udc_regs __iomem *regs, int epnum)
563 {
564 dev->int_enable &= ~INT_EPxDATASET (epnum);
565 writel(dev->int_enable, &regs->int_enable);
566 /* write may still be posted */
567 }
568
569 static inline void
570 pio_advance(struct goku_ep *ep)
571 {
572 struct goku_request *req;
573
574 if (unlikely(list_empty (&ep->queue)))
575 return;
576 req = list_entry(ep->queue.next, struct goku_request, queue);
577 (ep->is_in ? write_fifo : read_fifo)(ep, req);
578 }
579
580
581 /*-------------------------------------------------------------------------*/
582
583 // return: 0 = q running, 1 = q stopped, negative = errno
584 static int start_dma(struct goku_ep *ep, struct goku_request *req)
585 {
586 struct goku_udc_regs __iomem *regs = ep->dev->regs;
587 u32 master;
588 u32 start = req->req.dma;
589 u32 end = start + req->req.length - 1;
590
591 master = readl(&regs->dma_master) & MST_RW_BITS;
592
593 /* re-init the bits affecting IN dma; careful with zlps */
594 if (likely(ep->is_in)) {
595 if (unlikely(master & MST_RD_ENA)) {
596 DBG (ep->dev, "start, IN active dma %03x!!\n",
597 master);
598 // return -EL2HLT;
599 }
600 writel(end, &regs->in_dma_end);
601 writel(start, &regs->in_dma_start);
602
603 master &= ~MST_R_BITS;
604 if (unlikely(req->req.length == 0))
605 master = MST_RD_ENA | MST_RD_EOPB;
606 else if ((req->req.length % ep->ep.maxpacket) != 0
607 || req->req.zero)
608 master = MST_RD_ENA | MST_EOPB_ENA;
609 else
610 master = MST_RD_ENA | MST_EOPB_DIS;
611
612 ep->dev->int_enable |= INT_MSTRDEND;
613
614 /* Goku DMA-OUT merges short packets, which plays poorly with
615 * protocols where short packets mark the transfer boundaries.
616 * The chip supports a nonstandard policy with INT_MSTWRTMOUT,
617 * ending transfers after 3 SOFs; we don't turn it on.
618 */
619 } else {
620 if (unlikely(master & MST_WR_ENA)) {
621 DBG (ep->dev, "start, OUT active dma %03x!!\n",
622 master);
623 // return -EL2HLT;
624 }
625 writel(end, &regs->out_dma_end);
626 writel(start, &regs->out_dma_start);
627
628 master &= ~MST_W_BITS;
629 master |= MST_WR_ENA | MST_TIMEOUT_DIS;
630
631 ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
632 }
633
634 writel(master, &regs->dma_master);
635 writel(ep->dev->int_enable, &regs->int_enable);
636 return 0;
637 }
638
639 static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
640 {
641 struct goku_request *req;
642 struct goku_udc_regs __iomem *regs = ep->dev->regs;
643 u32 master;
644
645 master = readl(&regs->dma_master);
646
647 if (unlikely(list_empty(&ep->queue))) {
648 stop:
649 if (ep->is_in)
650 dev->int_enable &= ~INT_MSTRDEND;
651 else
652 dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
653 writel(dev->int_enable, &regs->int_enable);
654 return;
655 }
656 req = list_entry(ep->queue.next, struct goku_request, queue);
657
658 /* normal hw dma completion (not abort) */
659 if (likely(ep->is_in)) {
660 if (unlikely(master & MST_RD_ENA))
661 return;
662 req->req.actual = readl(&regs->in_dma_current);
663 } else {
664 if (unlikely(master & MST_WR_ENA))
665 return;
666
667 /* hardware merges short packets, and also hides packet
668 * overruns. a partial packet MAY be in the fifo here.
669 */
670 req->req.actual = readl(&regs->out_dma_current);
671 }
672 req->req.actual -= req->req.dma;
673 req->req.actual++;
674
675 #ifdef USB_TRACE
676 VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
677 ep->ep.name, ep->is_in ? "IN" : "OUT",
678 req->req.actual, req->req.length, req);
679 #endif
680 done(ep, req, 0);
681 if (list_empty(&ep->queue))
682 goto stop;
683 req = list_entry(ep->queue.next, struct goku_request, queue);
684 (void) start_dma(ep, req);
685 }
686
687 static void abort_dma(struct goku_ep *ep, int status)
688 {
689 struct goku_udc_regs __iomem *regs = ep->dev->regs;
690 struct goku_request *req;
691 u32 curr, master;
692
693 /* NAK future host requests, hoping the implicit delay lets the
694 * dma engine finish reading (or writing) its latest packet and
695 * empty the dma buffer (up to 16 bytes).
696 *
697 * This avoids needing to clean up a partial packet in the fifo;
698 * we can't do that for IN without side effects to HALT and TOGGLE.
699 */
700 command(regs, COMMAND_FIFO_DISABLE, ep->num);
701 req = list_entry(ep->queue.next, struct goku_request, queue);
702 master = readl(&regs->dma_master) & MST_RW_BITS;
703
704 /* FIXME using these resets isn't usably documented. this may
705 * not work unless it's followed by disabling the endpoint.
706 *
707 * FIXME the OUT reset path doesn't even behave consistently.
708 */
709 if (ep->is_in) {
710 if (unlikely((readl(&regs->dma_master) & MST_RD_ENA) == 0))
711 goto finished;
712 curr = readl(&regs->in_dma_current);
713
714 writel(curr, &regs->in_dma_end);
715 writel(curr, &regs->in_dma_start);
716
717 master &= ~MST_R_BITS;
718 master |= MST_RD_RESET;
719 writel(master, &regs->dma_master);
720
721 if (readl(&regs->dma_master) & MST_RD_ENA)
722 DBG(ep->dev, "IN dma active after reset!\n");
723
724 } else {
725 if (unlikely((readl(&regs->dma_master) & MST_WR_ENA) == 0))
726 goto finished;
727 curr = readl(&regs->out_dma_current);
728
729 writel(curr, &regs->out_dma_end);
730 writel(curr, &regs->out_dma_start);
731
732 master &= ~MST_W_BITS;
733 master |= MST_WR_RESET;
734 writel(master, &regs->dma_master);
735
736 if (readl(&regs->dma_master) & MST_WR_ENA)
737 DBG(ep->dev, "OUT dma active after reset!\n");
738 }
739 req->req.actual = (curr - req->req.dma) + 1;
740 req->req.status = status;
741
742 VDBG(ep->dev, "%s %s %s %d/%d\n", __FUNCTION__, ep->ep.name,
743 ep->is_in ? "IN" : "OUT",
744 req->req.actual, req->req.length);
745
746 command(regs, COMMAND_FIFO_ENABLE, ep->num);
747
748 return;
749
750 finished:
751 /* dma already completed; no abort needed */
752 command(regs, COMMAND_FIFO_ENABLE, ep->num);
753 req->req.actual = req->req.length;
754 req->req.status = 0;
755 }
756
757 /*-------------------------------------------------------------------------*/
758
759 static int
760 goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
761 {
762 struct goku_request *req;
763 struct goku_ep *ep;
764 struct goku_udc *dev;
765 unsigned long flags;
766 int status;
767
768 /* always require a cpu-view buffer so pio works */
769 req = container_of(_req, struct goku_request, req);
770 if (unlikely(!_req || !_req->complete
771 || !_req->buf || !list_empty(&req->queue)))
772 return -EINVAL;
773 ep = container_of(_ep, struct goku_ep, ep);
774 if (unlikely(!_ep || (!ep->desc && ep->num != 0)))
775 return -EINVAL;
776 dev = ep->dev;
777 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
778 return -ESHUTDOWN;
779
780 /* can't touch registers when suspended */
781 if (dev->ep0state == EP0_SUSPEND)
782 return -EBUSY;
783
784 /* set up dma mapping in case the caller didn't */
785 if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
786 _req->dma = pci_map_single(dev->pdev, _req->buf, _req->length,
787 ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
788 req->mapped = 1;
789 }
790
791 #ifdef USB_TRACE
792 VDBG(dev, "%s queue req %p, len %u buf %p\n",
793 _ep->name, _req, _req->length, _req->buf);
794 #endif
795
796 spin_lock_irqsave(&dev->lock, flags);
797
798 _req->status = -EINPROGRESS;
799 _req->actual = 0;
800
801 /* for ep0 IN without premature status, zlp is required and
802 * writing EOP starts the status stage (OUT).
803 */
804 if (unlikely(ep->num == 0 && ep->is_in))
805 _req->zero = 1;
806
807 /* kickstart this i/o queue? */
808 status = 0;
809 if (list_empty(&ep->queue) && likely(!ep->stopped)) {
810 /* dma: done after dma completion IRQ (or error)
811 * pio: done after last fifo operation
812 */
813 if (ep->dma)
814 status = start_dma(ep, req);
815 else
816 status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
817
818 if (unlikely(status != 0)) {
819 if (status > 0)
820 status = 0;
821 req = NULL;
822 }
823
824 } /* else pio or dma irq handler advances the queue. */
825
826 if (likely(req != 0))
827 list_add_tail(&req->queue, &ep->queue);
828
829 if (likely(!list_empty(&ep->queue))
830 && likely(ep->num != 0)
831 && !ep->dma
832 && !(dev->int_enable & INT_EPxDATASET (ep->num)))
833 pio_irq_enable(dev, dev->regs, ep->num);
834
835 spin_unlock_irqrestore(&dev->lock, flags);
836
837 /* pci writes may still be posted */
838 return status;
839 }
840
841 /* dequeue ALL requests */
842 static void nuke(struct goku_ep *ep, int status)
843 {
844 struct goku_request *req;
845
846 ep->stopped = 1;
847 if (list_empty(&ep->queue))
848 return;
849 if (ep->dma)
850 abort_dma(ep, status);
851 while (!list_empty(&ep->queue)) {
852 req = list_entry(ep->queue.next, struct goku_request, queue);
853 done(ep, req, status);
854 }
855 }
856
857 /* dequeue JUST ONE request */
858 static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
859 {
860 struct goku_request *req;
861 struct goku_ep *ep;
862 struct goku_udc *dev;
863 unsigned long flags;
864
865 ep = container_of(_ep, struct goku_ep, ep);
866 if (!_ep || !_req || (!ep->desc && ep->num != 0))
867 return -EINVAL;
868 dev = ep->dev;
869 if (!dev->driver)
870 return -ESHUTDOWN;
871
872 /* we can't touch (dma) registers when suspended */
873 if (dev->ep0state == EP0_SUSPEND)
874 return -EBUSY;
875
876 VDBG(dev, "%s %s %s %s %p\n", __FUNCTION__, _ep->name,
877 ep->is_in ? "IN" : "OUT",
878 ep->dma ? "dma" : "pio",
879 _req);
880
881 spin_lock_irqsave(&dev->lock, flags);
882
883 /* make sure it's actually queued on this endpoint */
884 list_for_each_entry (req, &ep->queue, queue) {
885 if (&req->req == _req)
886 break;
887 }
888 if (&req->req != _req) {
889 spin_unlock_irqrestore (&dev->lock, flags);
890 return -EINVAL;
891 }
892
893 if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
894 abort_dma(ep, -ECONNRESET);
895 done(ep, req, -ECONNRESET);
896 dma_advance(dev, ep);
897 } else if (!list_empty(&req->queue))
898 done(ep, req, -ECONNRESET);
899 else
900 req = NULL;
901 spin_unlock_irqrestore(&dev->lock, flags);
902
903 return req ? 0 : -EOPNOTSUPP;
904 }
905
906 /*-------------------------------------------------------------------------*/
907
908 static void goku_clear_halt(struct goku_ep *ep)
909 {
910 // assert (ep->num !=0)
911 VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
912 command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
913 command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
914 if (ep->stopped) {
915 ep->stopped = 0;
916 if (ep->dma) {
917 struct goku_request *req;
918
919 if (list_empty(&ep->queue))
920 return;
921 req = list_entry(ep->queue.next, struct goku_request,
922 queue);
923 (void) start_dma(ep, req);
924 } else
925 pio_advance(ep);
926 }
927 }
928
929 static int goku_set_halt(struct usb_ep *_ep, int value)
930 {
931 struct goku_ep *ep;
932 unsigned long flags;
933 int retval = 0;
934
935 if (!_ep)
936 return -ENODEV;
937 ep = container_of (_ep, struct goku_ep, ep);
938
939 if (ep->num == 0) {
940 if (value) {
941 ep->dev->ep0state = EP0_STALL;
942 ep->dev->ep[0].stopped = 1;
943 } else
944 return -EINVAL;
945
946 /* don't change EPxSTATUS_EP_INVALID to READY */
947 } else if (!ep->desc) {
948 DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
949 return -EINVAL;
950 }
951
952 spin_lock_irqsave(&ep->dev->lock, flags);
953 if (!list_empty(&ep->queue))
954 retval = -EAGAIN;
955 else if (ep->is_in && value
956 /* data in (either) packet buffer? */
957 && (readl(&ep->dev->regs->DataSet)
958 & DATASET_AB(ep->num)))
959 retval = -EAGAIN;
960 else if (!value)
961 goku_clear_halt(ep);
962 else {
963 ep->stopped = 1;
964 VDBG(ep->dev, "%s set halt\n", ep->ep.name);
965 command(ep->dev->regs, COMMAND_STALL, ep->num);
966 readl(ep->reg_status);
967 }
968 spin_unlock_irqrestore(&ep->dev->lock, flags);
969 return retval;
970 }
971
972 static int goku_fifo_status(struct usb_ep *_ep)
973 {
974 struct goku_ep *ep;
975 struct goku_udc_regs __iomem *regs;
976 u32 size;
977
978 if (!_ep)
979 return -ENODEV;
980 ep = container_of(_ep, struct goku_ep, ep);
981
982 /* size is only reported sanely for OUT */
983 if (ep->is_in)
984 return -EOPNOTSUPP;
985
986 /* ignores 16-byte dma buffer; SizeH == 0 */
987 regs = ep->dev->regs;
988 size = readl(&regs->EPxSizeLA[ep->num]) & DATASIZE;
989 size += readl(&regs->EPxSizeLB[ep->num]) & DATASIZE;
990 VDBG(ep->dev, "%s %s %u\n", __FUNCTION__, ep->ep.name, size);
991 return size;
992 }
993
994 static void goku_fifo_flush(struct usb_ep *_ep)
995 {
996 struct goku_ep *ep;
997 struct goku_udc_regs __iomem *regs;
998 u32 size;
999
1000 if (!_ep)
1001 return;
1002 ep = container_of(_ep, struct goku_ep, ep);
1003 VDBG(ep->dev, "%s %s\n", __FUNCTION__, ep->ep.name);
1004
1005 /* don't change EPxSTATUS_EP_INVALID to READY */
1006 if (!ep->desc && ep->num != 0) {
1007 DBG(ep->dev, "%s %s inactive?\n", __FUNCTION__, ep->ep.name);
1008 return;
1009 }
1010
1011 regs = ep->dev->regs;
1012 size = readl(&regs->EPxSizeLA[ep->num]);
1013 size &= DATASIZE;
1014
1015 /* Non-desirable behavior: FIFO_CLEAR also clears the
1016 * endpoint halt feature. For OUT, we _could_ just read
1017 * the bytes out (PIO, if !ep->dma); for in, no choice.
1018 */
1019 if (size)
1020 command(regs, COMMAND_FIFO_CLEAR, ep->num);
1021 }
1022
1023 static struct usb_ep_ops goku_ep_ops = {
1024 .enable = goku_ep_enable,
1025 .disable = goku_ep_disable,
1026
1027 .alloc_request = goku_alloc_request,
1028 .free_request = goku_free_request,
1029
1030 .alloc_buffer = goku_alloc_buffer,
1031 .free_buffer = goku_free_buffer,
1032
1033 .queue = goku_queue,
1034 .dequeue = goku_dequeue,
1035
1036 .set_halt = goku_set_halt,
1037 .fifo_status = goku_fifo_status,
1038 .fifo_flush = goku_fifo_flush,
1039 };
1040
1041 /*-------------------------------------------------------------------------*/
1042
1043 static int goku_get_frame(struct usb_gadget *_gadget)
1044 {
1045 return -EOPNOTSUPP;
1046 }
1047
1048 static const struct usb_gadget_ops goku_ops = {
1049 .get_frame = goku_get_frame,
1050 // no remote wakeup
1051 // not selfpowered
1052 };
1053
1054 /*-------------------------------------------------------------------------*/
1055
1056 static inline char *dmastr(void)
1057 {
1058 if (use_dma == 0)
1059 return "(dma disabled)";
1060 else if (use_dma == 2)
1061 return "(dma IN and OUT)";
1062 else
1063 return "(dma IN)";
1064 }
1065
1066 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1067
1068 static const char proc_node_name [] = "driver/udc";
1069
1070 #define FOURBITS "%s%s%s%s"
1071 #define EIGHTBITS FOURBITS FOURBITS
1072
1073 static void
1074 dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
1075 {
1076 int t;
1077
1078 /* int_status is the same format ... */
1079 t = scnprintf(*next, *size,
1080 "%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
1081 label, mask,
1082 (mask & INT_PWRDETECT) ? " power" : "",
1083 (mask & INT_SYSERROR) ? " sys" : "",
1084 (mask & INT_MSTRDEND) ? " in-dma" : "",
1085 (mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
1086
1087 (mask & INT_MSTWREND) ? " out-dma" : "",
1088 (mask & INT_MSTWRSET) ? " wrset" : "",
1089 (mask & INT_ERR) ? " err" : "",
1090 (mask & INT_SOF) ? " sof" : "",
1091
1092 (mask & INT_EP3NAK) ? " ep3nak" : "",
1093 (mask & INT_EP2NAK) ? " ep2nak" : "",
1094 (mask & INT_EP1NAK) ? " ep1nak" : "",
1095 (mask & INT_EP3DATASET) ? " ep3" : "",
1096
1097 (mask & INT_EP2DATASET) ? " ep2" : "",
1098 (mask & INT_EP1DATASET) ? " ep1" : "",
1099 (mask & INT_STATUSNAK) ? " ep0snak" : "",
1100 (mask & INT_STATUS) ? " ep0status" : "",
1101
1102 (mask & INT_SETUP) ? " setup" : "",
1103 (mask & INT_ENDPOINT0) ? " ep0" : "",
1104 (mask & INT_USBRESET) ? " reset" : "",
1105 (mask & INT_SUSPEND) ? " suspend" : "");
1106 *size -= t;
1107 *next += t;
1108 }
1109
1110
1111 static int
1112 udc_proc_read(char *buffer, char **start, off_t off, int count,
1113 int *eof, void *_dev)
1114 {
1115 char *buf = buffer;
1116 struct goku_udc *dev = _dev;
1117 struct goku_udc_regs __iomem *regs = dev->regs;
1118 char *next = buf;
1119 unsigned size = count;
1120 unsigned long flags;
1121 int i, t, is_usb_connected;
1122 u32 tmp;
1123
1124 if (off != 0)
1125 return 0;
1126
1127 local_irq_save(flags);
1128
1129 /* basic device status */
1130 tmp = readl(&regs->power_detect);
1131 is_usb_connected = tmp & PW_DETECT;
1132 t = scnprintf(next, size,
1133 "%s - %s\n"
1134 "%s version: %s %s\n"
1135 "Gadget driver: %s\n"
1136 "Host %s, %s\n"
1137 "\n",
1138 pci_name(dev->pdev), driver_desc,
1139 driver_name, DRIVER_VERSION, dmastr(),
1140 dev->driver ? dev->driver->driver.name : "(none)",
1141 is_usb_connected
1142 ? ((tmp & PW_PULLUP) ? "full speed" : "powered")
1143 : "disconnected",
1144 ({char *tmp;
1145 switch(dev->ep0state){
1146 case EP0_DISCONNECT: tmp = "ep0_disconnect"; break;
1147 case EP0_IDLE: tmp = "ep0_idle"; break;
1148 case EP0_IN: tmp = "ep0_in"; break;
1149 case EP0_OUT: tmp = "ep0_out"; break;
1150 case EP0_STATUS: tmp = "ep0_status"; break;
1151 case EP0_STALL: tmp = "ep0_stall"; break;
1152 case EP0_SUSPEND: tmp = "ep0_suspend"; break;
1153 default: tmp = "ep0_?"; break;
1154 } tmp; })
1155 );
1156 size -= t;
1157 next += t;
1158
1159 dump_intmask("int_status", readl(&regs->int_status), &next, &size);
1160 dump_intmask("int_enable", readl(&regs->int_enable), &next, &size);
1161
1162 if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
1163 goto done;
1164
1165 /* registers for (active) device and ep0 */
1166 t = scnprintf(next, size, "\nirqs %lu\ndataset %02x "
1167 "single.bcs %02x.%02x state %x addr %u\n",
1168 dev->irqs, readl(&regs->DataSet),
1169 readl(&regs->EPxSingle), readl(&regs->EPxBCS),
1170 readl(&regs->UsbState),
1171 readl(&regs->address));
1172 size -= t;
1173 next += t;
1174
1175 tmp = readl(&regs->dma_master);
1176 t = scnprintf(next, size,
1177 "dma %03X =" EIGHTBITS "%s %s\n", tmp,
1178 (tmp & MST_EOPB_DIS) ? " eopb-" : "",
1179 (tmp & MST_EOPB_ENA) ? " eopb+" : "",
1180 (tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
1181 (tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
1182
1183 (tmp & MST_RD_EOPB) ? " eopb" : "",
1184 (tmp & MST_RD_RESET) ? " in_reset" : "",
1185 (tmp & MST_WR_RESET) ? " out_reset" : "",
1186 (tmp & MST_RD_ENA) ? " IN" : "",
1187
1188 (tmp & MST_WR_ENA) ? " OUT" : "",
1189 (tmp & MST_CONNECTION)
1190 ? "ep1in/ep2out"
1191 : "ep1out/ep2in");
1192 size -= t;
1193 next += t;
1194
1195 /* dump endpoint queues */
1196 for (i = 0; i < 4; i++) {
1197 struct goku_ep *ep = &dev->ep [i];
1198 struct goku_request *req;
1199 int t;
1200
1201 if (i && !ep->desc)
1202 continue;
1203
1204 tmp = readl(ep->reg_status);
1205 t = scnprintf(next, size,
1206 "%s %s max %u %s, irqs %lu, "
1207 "status %02x (%s) " FOURBITS "\n",
1208 ep->ep.name,
1209 ep->is_in ? "in" : "out",
1210 ep->ep.maxpacket,
1211 ep->dma ? "dma" : "pio",
1212 ep->irqs,
1213 tmp, ({ char *s;
1214 switch (tmp & EPxSTATUS_EP_MASK) {
1215 case EPxSTATUS_EP_READY:
1216 s = "ready"; break;
1217 case EPxSTATUS_EP_DATAIN:
1218 s = "packet"; break;
1219 case EPxSTATUS_EP_FULL:
1220 s = "full"; break;
1221 case EPxSTATUS_EP_TX_ERR: // host will retry
1222 s = "tx_err"; break;
1223 case EPxSTATUS_EP_RX_ERR:
1224 s = "rx_err"; break;
1225 case EPxSTATUS_EP_BUSY: /* ep0 only */
1226 s = "busy"; break;
1227 case EPxSTATUS_EP_STALL:
1228 s = "stall"; break;
1229 case EPxSTATUS_EP_INVALID: // these "can't happen"
1230 s = "invalid"; break;
1231 default:
1232 s = "?"; break;
1233 }; s; }),
1234 (tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
1235 (tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
1236 (tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
1237 (tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""
1238 );
1239 if (t <= 0 || t > size)
1240 goto done;
1241 size -= t;
1242 next += t;
1243
1244 if (list_empty(&ep->queue)) {
1245 t = scnprintf(next, size, "\t(nothing queued)\n");
1246 if (t <= 0 || t > size)
1247 goto done;
1248 size -= t;
1249 next += t;
1250 continue;
1251 }
1252 list_for_each_entry(req, &ep->queue, queue) {
1253 if (ep->dma && req->queue.prev == &ep->queue) {
1254 if (i == UDC_MSTRD_ENDPOINT)
1255 tmp = readl(&regs->in_dma_current);
1256 else
1257 tmp = readl(&regs->out_dma_current);
1258 tmp -= req->req.dma;
1259 tmp++;
1260 } else
1261 tmp = req->req.actual;
1262
1263 t = scnprintf(next, size,
1264 "\treq %p len %u/%u buf %p\n",
1265 &req->req, tmp, req->req.length,
1266 req->req.buf);
1267 if (t <= 0 || t > size)
1268 goto done;
1269 size -= t;
1270 next += t;
1271 }
1272 }
1273
1274 done:
1275 local_irq_restore(flags);
1276 *eof = 1;
1277 return count - size;
1278 }
1279
1280 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1281
1282 /*-------------------------------------------------------------------------*/
1283
1284 static void udc_reinit (struct goku_udc *dev)
1285 {
1286 static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
1287
1288 unsigned i;
1289
1290 INIT_LIST_HEAD (&dev->gadget.ep_list);
1291 dev->gadget.ep0 = &dev->ep [0].ep;
1292 dev->gadget.speed = USB_SPEED_UNKNOWN;
1293 dev->ep0state = EP0_DISCONNECT;
1294 dev->irqs = 0;
1295
1296 for (i = 0; i < 4; i++) {
1297 struct goku_ep *ep = &dev->ep[i];
1298
1299 ep->num = i;
1300 ep->ep.name = names[i];
1301 ep->reg_fifo = &dev->regs->ep_fifo [i];
1302 ep->reg_status = &dev->regs->ep_status [i];
1303 ep->reg_mode = &dev->regs->ep_mode[i];
1304
1305 ep->ep.ops = &goku_ep_ops;
1306 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1307 ep->dev = dev;
1308 INIT_LIST_HEAD (&ep->queue);
1309
1310 ep_reset(NULL, ep);
1311 }
1312
1313 dev->ep[0].reg_mode = NULL;
1314 dev->ep[0].ep.maxpacket = MAX_EP0_SIZE;
1315 list_del_init (&dev->ep[0].ep.ep_list);
1316 }
1317
1318 static void udc_reset(struct goku_udc *dev)
1319 {
1320 struct goku_udc_regs __iomem *regs = dev->regs;
1321
1322 writel(0, &regs->power_detect);
1323 writel(0, &regs->int_enable);
1324 readl(&regs->int_enable);
1325 dev->int_enable = 0;
1326
1327 /* deassert reset, leave USB D+ at hi-Z (no pullup)
1328 * don't let INT_PWRDETECT sequence begin
1329 */
1330 udelay(250);
1331 writel(PW_RESETB, &regs->power_detect);
1332 readl(&regs->int_enable);
1333 }
1334
1335 static void ep0_start(struct goku_udc *dev)
1336 {
1337 struct goku_udc_regs __iomem *regs = dev->regs;
1338 unsigned i;
1339
1340 VDBG(dev, "%s\n", __FUNCTION__);
1341
1342 udc_reset(dev);
1343 udc_reinit (dev);
1344 //writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, &regs->dma_master);
1345
1346 /* hw handles set_address, set_feature, get_status; maybe more */
1347 writel( G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
1348 | G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
1349 | G_REQMODE_GET_DESC
1350 | G_REQMODE_CLEAR_FEAT
1351 , &regs->reqmode);
1352
1353 for (i = 0; i < 4; i++)
1354 dev->ep[i].irqs = 0;
1355
1356 /* can't modify descriptors after writing UsbReady */
1357 for (i = 0; i < DESC_LEN; i++)
1358 writel(0, &regs->descriptors[i]);
1359 writel(0, &regs->UsbReady);
1360
1361 /* expect ep0 requests when the host drops reset */
1362 writel(PW_RESETB | PW_PULLUP, &regs->power_detect);
1363 dev->int_enable = INT_DEVWIDE | INT_EP0;
1364 writel(dev->int_enable, &dev->regs->int_enable);
1365 readl(&regs->int_enable);
1366 dev->gadget.speed = USB_SPEED_FULL;
1367 dev->ep0state = EP0_IDLE;
1368 }
1369
1370 static void udc_enable(struct goku_udc *dev)
1371 {
1372 /* start enumeration now, or after power detect irq */
1373 if (readl(&dev->regs->power_detect) & PW_DETECT)
1374 ep0_start(dev);
1375 else {
1376 DBG(dev, "%s\n", __FUNCTION__);
1377 dev->int_enable = INT_PWRDETECT;
1378 writel(dev->int_enable, &dev->regs->int_enable);
1379 }
1380 }
1381
1382 /*-------------------------------------------------------------------------*/
1383
1384 /* keeping it simple:
1385 * - one bus driver, initted first;
1386 * - one function driver, initted second
1387 */
1388
1389 static struct goku_udc *the_controller;
1390
1391 /* when a driver is successfully registered, it will receive
1392 * control requests including set_configuration(), which enables
1393 * non-control requests. then usb traffic follows until a
1394 * disconnect is reported. then a host may connect again, or
1395 * the driver might get unbound.
1396 */
1397 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1398 {
1399 struct goku_udc *dev = the_controller;
1400 int retval;
1401
1402 if (!driver
1403 || driver->speed != USB_SPEED_FULL
1404 || !driver->bind
1405 || !driver->disconnect
1406 || !driver->setup)
1407 return -EINVAL;
1408 if (!dev)
1409 return -ENODEV;
1410 if (dev->driver)
1411 return -EBUSY;
1412
1413 /* hook up the driver */
1414 driver->driver.bus = NULL;
1415 dev->driver = driver;
1416 dev->gadget.dev.driver = &driver->driver;
1417 retval = driver->bind(&dev->gadget);
1418 if (retval) {
1419 DBG(dev, "bind to driver %s --> error %d\n",
1420 driver->driver.name, retval);
1421 dev->driver = NULL;
1422 dev->gadget.dev.driver = NULL;
1423 return retval;
1424 }
1425
1426 /* then enable host detection and ep0; and we're ready
1427 * for set_configuration as well as eventual disconnect.
1428 */
1429 udc_enable(dev);
1430
1431 DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
1432 return 0;
1433 }
1434 EXPORT_SYMBOL(usb_gadget_register_driver);
1435
1436 static void
1437 stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
1438 {
1439 unsigned i;
1440
1441 DBG (dev, "%s\n", __FUNCTION__);
1442
1443 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1444 driver = NULL;
1445
1446 /* disconnect gadget driver after quiesceing hw and the driver */
1447 udc_reset (dev);
1448 for (i = 0; i < 4; i++)
1449 nuke(&dev->ep [i], -ESHUTDOWN);
1450 if (driver) {
1451 spin_unlock(&dev->lock);
1452 driver->disconnect(&dev->gadget);
1453 spin_lock(&dev->lock);
1454 }
1455
1456 if (dev->driver)
1457 udc_enable(dev);
1458 }
1459
1460 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1461 {
1462 struct goku_udc *dev = the_controller;
1463 unsigned long flags;
1464
1465 if (!dev)
1466 return -ENODEV;
1467 if (!driver || driver != dev->driver || !driver->unbind)
1468 return -EINVAL;
1469
1470 spin_lock_irqsave(&dev->lock, flags);
1471 dev->driver = NULL;
1472 stop_activity(dev, driver);
1473 spin_unlock_irqrestore(&dev->lock, flags);
1474
1475 driver->unbind(&dev->gadget);
1476
1477 DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
1478 return 0;
1479 }
1480 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1481
1482
1483 /*-------------------------------------------------------------------------*/
1484
1485 static void ep0_setup(struct goku_udc *dev)
1486 {
1487 struct goku_udc_regs __iomem *regs = dev->regs;
1488 struct usb_ctrlrequest ctrl;
1489 int tmp;
1490
1491 /* read SETUP packet and enter DATA stage */
1492 ctrl.bRequestType = readl(&regs->bRequestType);
1493 ctrl.bRequest = readl(&regs->bRequest);
1494 ctrl.wValue = cpu_to_le16((readl(&regs->wValueH) << 8)
1495 | readl(&regs->wValueL));
1496 ctrl.wIndex = cpu_to_le16((readl(&regs->wIndexH) << 8)
1497 | readl(&regs->wIndexL));
1498 ctrl.wLength = cpu_to_le16((readl(&regs->wLengthH) << 8)
1499 | readl(&regs->wLengthL));
1500 writel(0, &regs->SetupRecv);
1501
1502 nuke(&dev->ep[0], 0);
1503 dev->ep[0].stopped = 0;
1504 if (likely(ctrl.bRequestType & USB_DIR_IN)) {
1505 dev->ep[0].is_in = 1;
1506 dev->ep0state = EP0_IN;
1507 /* detect early status stages */
1508 writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
1509 } else {
1510 dev->ep[0].is_in = 0;
1511 dev->ep0state = EP0_OUT;
1512
1513 /* NOTE: CLEAR_FEATURE is done in software so that we can
1514 * synchronize transfer restarts after bulk IN stalls. data
1515 * won't even enter the fifo until the halt is cleared.
1516 */
1517 switch (ctrl.bRequest) {
1518 case USB_REQ_CLEAR_FEATURE:
1519 switch (ctrl.bRequestType) {
1520 case USB_RECIP_ENDPOINT:
1521 tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
1522 /* active endpoint */
1523 if (tmp > 3 || (!dev->ep[tmp].desc && tmp != 0))
1524 goto stall;
1525 if (ctrl.wIndex & __constant_cpu_to_le16(
1526 USB_DIR_IN)) {
1527 if (!dev->ep[tmp].is_in)
1528 goto stall;
1529 } else {
1530 if (dev->ep[tmp].is_in)
1531 goto stall;
1532 }
1533 if (ctrl.wValue != __constant_cpu_to_le16(
1534 USB_ENDPOINT_HALT))
1535 goto stall;
1536 if (tmp)
1537 goku_clear_halt(&dev->ep[tmp]);
1538 succeed:
1539 /* start ep0out status stage */
1540 writel(~(1<<0), &regs->EOP);
1541 dev->ep[0].stopped = 1;
1542 dev->ep0state = EP0_STATUS;
1543 return;
1544 case USB_RECIP_DEVICE:
1545 /* device remote wakeup: always clear */
1546 if (ctrl.wValue != __constant_cpu_to_le16(1))
1547 goto stall;
1548 VDBG(dev, "clear dev remote wakeup\n");
1549 goto succeed;
1550 case USB_RECIP_INTERFACE:
1551 goto stall;
1552 default: /* pass to gadget driver */
1553 break;
1554 }
1555 break;
1556 default:
1557 break;
1558 }
1559 }
1560
1561 #ifdef USB_TRACE
1562 VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1563 ctrl.bRequestType, ctrl.bRequest,
1564 le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
1565 le16_to_cpu(ctrl.wLength));
1566 #endif
1567
1568 /* hw wants to know when we're configured (or not) */
1569 dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
1570 && ctrl.bRequestType == USB_RECIP_DEVICE);
1571 if (unlikely(dev->req_config))
1572 dev->configured = (ctrl.wValue != __constant_cpu_to_le16(0));
1573
1574 /* delegate everything to the gadget driver.
1575 * it may respond after this irq handler returns.
1576 */
1577 spin_unlock (&dev->lock);
1578 tmp = dev->driver->setup(&dev->gadget, &ctrl);
1579 spin_lock (&dev->lock);
1580 if (unlikely(tmp < 0)) {
1581 stall:
1582 #ifdef USB_TRACE
1583 VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
1584 ctrl.bRequestType, ctrl.bRequest, tmp);
1585 #endif
1586 command(regs, COMMAND_STALL, 0);
1587 dev->ep[0].stopped = 1;
1588 dev->ep0state = EP0_STALL;
1589 }
1590
1591 /* expect at least one data or status stage irq */
1592 }
1593
1594 #define ACK(irqbit) { \
1595 stat &= ~irqbit; \
1596 writel(~irqbit, &regs->int_status); \
1597 handled = 1; \
1598 }
1599
1600 static irqreturn_t goku_irq(int irq, void *_dev)
1601 {
1602 struct goku_udc *dev = _dev;
1603 struct goku_udc_regs __iomem *regs = dev->regs;
1604 struct goku_ep *ep;
1605 u32 stat, handled = 0;
1606 unsigned i, rescans = 5;
1607
1608 spin_lock(&dev->lock);
1609
1610 rescan:
1611 stat = readl(&regs->int_status) & dev->int_enable;
1612 if (!stat)
1613 goto done;
1614 dev->irqs++;
1615
1616 /* device-wide irqs */
1617 if (unlikely(stat & INT_DEVWIDE)) {
1618 if (stat & INT_SYSERROR) {
1619 ERROR(dev, "system error\n");
1620 stop_activity(dev, dev->driver);
1621 stat = 0;
1622 handled = 1;
1623 // FIXME have a neater way to prevent re-enumeration
1624 dev->driver = NULL;
1625 goto done;
1626 }
1627 if (stat & INT_PWRDETECT) {
1628 writel(~stat, &regs->int_status);
1629 if (readl(&dev->regs->power_detect) & PW_DETECT) {
1630 VDBG(dev, "connect\n");
1631 ep0_start(dev);
1632 } else {
1633 DBG(dev, "disconnect\n");
1634 if (dev->gadget.speed == USB_SPEED_FULL)
1635 stop_activity(dev, dev->driver);
1636 dev->ep0state = EP0_DISCONNECT;
1637 dev->int_enable = INT_DEVWIDE;
1638 writel(dev->int_enable, &dev->regs->int_enable);
1639 }
1640 stat = 0;
1641 handled = 1;
1642 goto done;
1643 }
1644 if (stat & INT_SUSPEND) {
1645 ACK(INT_SUSPEND);
1646 if (readl(&regs->ep_status[0]) & EPxSTATUS_SUSPEND) {
1647 switch (dev->ep0state) {
1648 case EP0_DISCONNECT:
1649 case EP0_SUSPEND:
1650 goto pm_next;
1651 default:
1652 break;
1653 }
1654 DBG(dev, "USB suspend\n");
1655 dev->ep0state = EP0_SUSPEND;
1656 if (dev->gadget.speed != USB_SPEED_UNKNOWN
1657 && dev->driver
1658 && dev->driver->suspend) {
1659 spin_unlock(&dev->lock);
1660 dev->driver->suspend(&dev->gadget);
1661 spin_lock(&dev->lock);
1662 }
1663 } else {
1664 if (dev->ep0state != EP0_SUSPEND) {
1665 DBG(dev, "bogus USB resume %d\n",
1666 dev->ep0state);
1667 goto pm_next;
1668 }
1669 DBG(dev, "USB resume\n");
1670 dev->ep0state = EP0_IDLE;
1671 if (dev->gadget.speed != USB_SPEED_UNKNOWN
1672 && dev->driver
1673 && dev->driver->resume) {
1674 spin_unlock(&dev->lock);
1675 dev->driver->resume(&dev->gadget);
1676 spin_lock(&dev->lock);
1677 }
1678 }
1679 }
1680 pm_next:
1681 if (stat & INT_USBRESET) { /* hub reset done */
1682 ACK(INT_USBRESET);
1683 INFO(dev, "USB reset done, gadget %s\n",
1684 dev->driver->driver.name);
1685 }
1686 // and INT_ERR on some endpoint's crc/bitstuff/... problem
1687 }
1688
1689 /* progress ep0 setup, data, or status stages.
1690 * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
1691 */
1692 if (stat & INT_SETUP) {
1693 ACK(INT_SETUP);
1694 dev->ep[0].irqs++;
1695 ep0_setup(dev);
1696 }
1697 if (stat & INT_STATUSNAK) {
1698 ACK(INT_STATUSNAK|INT_ENDPOINT0);
1699 if (dev->ep0state == EP0_IN) {
1700 ep = &dev->ep[0];
1701 ep->irqs++;
1702 nuke(ep, 0);
1703 writel(~(1<<0), &regs->EOP);
1704 dev->ep0state = EP0_STATUS;
1705 }
1706 }
1707 if (stat & INT_ENDPOINT0) {
1708 ACK(INT_ENDPOINT0);
1709 ep = &dev->ep[0];
1710 ep->irqs++;
1711 pio_advance(ep);
1712 }
1713
1714 /* dma completion */
1715 if (stat & INT_MSTRDEND) { /* IN */
1716 ACK(INT_MSTRDEND);
1717 ep = &dev->ep[UDC_MSTRD_ENDPOINT];
1718 ep->irqs++;
1719 dma_advance(dev, ep);
1720 }
1721 if (stat & INT_MSTWREND) { /* OUT */
1722 ACK(INT_MSTWREND);
1723 ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1724 ep->irqs++;
1725 dma_advance(dev, ep);
1726 }
1727 if (stat & INT_MSTWRTMOUT) { /* OUT */
1728 ACK(INT_MSTWRTMOUT);
1729 ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1730 ep->irqs++;
1731 ERROR(dev, "%s write timeout ?\n", ep->ep.name);
1732 // reset dma? then dma_advance()
1733 }
1734
1735 /* pio */
1736 for (i = 1; i < 4; i++) {
1737 u32 tmp = INT_EPxDATASET(i);
1738
1739 if (!(stat & tmp))
1740 continue;
1741 ep = &dev->ep[i];
1742 pio_advance(ep);
1743 if (list_empty (&ep->queue))
1744 pio_irq_disable(dev, regs, i);
1745 stat &= ~tmp;
1746 handled = 1;
1747 ep->irqs++;
1748 }
1749
1750 if (rescans--)
1751 goto rescan;
1752
1753 done:
1754 (void)readl(&regs->int_enable);
1755 spin_unlock(&dev->lock);
1756 if (stat)
1757 DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
1758 readl(&regs->int_status), dev->int_enable);
1759 return IRQ_RETVAL(handled);
1760 }
1761
1762 #undef ACK
1763
1764 /*-------------------------------------------------------------------------*/
1765
1766 static void gadget_release(struct device *_dev)
1767 {
1768 struct goku_udc *dev = dev_get_drvdata(_dev);
1769
1770 kfree(dev);
1771 }
1772
1773 /* tear down the binding between this driver and the pci device */
1774
1775 static void goku_remove(struct pci_dev *pdev)
1776 {
1777 struct goku_udc *dev = pci_get_drvdata(pdev);
1778
1779 DBG(dev, "%s\n", __FUNCTION__);
1780
1781 BUG_ON(dev->driver);
1782
1783 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1784 remove_proc_entry(proc_node_name, NULL);
1785 #endif
1786 if (dev->regs)
1787 udc_reset(dev);
1788 if (dev->got_irq)
1789 free_irq(pdev->irq, dev);
1790 if (dev->regs)
1791 iounmap(dev->regs);
1792 if (dev->got_region)
1793 release_mem_region(pci_resource_start (pdev, 0),
1794 pci_resource_len (pdev, 0));
1795 if (dev->enabled)
1796 pci_disable_device(pdev);
1797 device_unregister(&dev->gadget.dev);
1798
1799 pci_set_drvdata(pdev, NULL);
1800 dev->regs = NULL;
1801 the_controller = NULL;
1802
1803 INFO(dev, "unbind\n");
1804 }
1805
1806 /* wrap this driver around the specified pci device, but
1807 * don't respond over USB until a gadget driver binds to us.
1808 */
1809
1810 static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1811 {
1812 struct goku_udc *dev = NULL;
1813 unsigned long resource, len;
1814 void __iomem *base = NULL;
1815 int retval;
1816
1817 /* if you want to support more than one controller in a system,
1818 * usb_gadget_driver_{register,unregister}() must change.
1819 */
1820 if (the_controller) {
1821 WARN(dev, "ignoring %s\n", pci_name(pdev));
1822 return -EBUSY;
1823 }
1824 if (!pdev->irq) {
1825 printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
1826 retval = -ENODEV;
1827 goto done;
1828 }
1829
1830 /* alloc, and start init */
1831 dev = kmalloc (sizeof *dev, GFP_KERNEL);
1832 if (dev == NULL){
1833 pr_debug("enomem %s\n", pci_name(pdev));
1834 retval = -ENOMEM;
1835 goto done;
1836 }
1837
1838 memset(dev, 0, sizeof *dev);
1839 spin_lock_init(&dev->lock);
1840 dev->pdev = pdev;
1841 dev->gadget.ops = &goku_ops;
1842
1843 /* the "gadget" abstracts/virtualizes the controller */
1844 strcpy(dev->gadget.dev.bus_id, "gadget");
1845 dev->gadget.dev.parent = &pdev->dev;
1846 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
1847 dev->gadget.dev.release = gadget_release;
1848 dev->gadget.name = driver_name;
1849
1850 /* now all the pci goodies ... */
1851 retval = pci_enable_device(pdev);
1852 if (retval < 0) {
1853 DBG(dev, "can't enable, %d\n", retval);
1854 goto done;
1855 }
1856 dev->enabled = 1;
1857
1858 resource = pci_resource_start(pdev, 0);
1859 len = pci_resource_len(pdev, 0);
1860 if (!request_mem_region(resource, len, driver_name)) {
1861 DBG(dev, "controller already in use\n");
1862 retval = -EBUSY;
1863 goto done;
1864 }
1865 dev->got_region = 1;
1866
1867 base = ioremap_nocache(resource, len);
1868 if (base == NULL) {
1869 DBG(dev, "can't map memory\n");
1870 retval = -EFAULT;
1871 goto done;
1872 }
1873 dev->regs = (struct goku_udc_regs __iomem *) base;
1874
1875 pci_set_drvdata(pdev, dev);
1876 INFO(dev, "%s\n", driver_desc);
1877 INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
1878 INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
1879
1880 /* init to known state, then setup irqs */
1881 udc_reset(dev);
1882 udc_reinit (dev);
1883 if (request_irq(pdev->irq, goku_irq, IRQF_SHARED/*|IRQF_SAMPLE_RANDOM*/,
1884 driver_name, dev) != 0) {
1885 DBG(dev, "request interrupt %d failed\n", pdev->irq);
1886 retval = -EBUSY;
1887 goto done;
1888 }
1889 dev->got_irq = 1;
1890 if (use_dma)
1891 pci_set_master(pdev);
1892
1893
1894 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1895 create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
1896 #endif
1897
1898 /* done */
1899 the_controller = dev;
1900 device_register(&dev->gadget.dev);
1901
1902 return 0;
1903
1904 done:
1905 if (dev)
1906 goku_remove (pdev);
1907 return retval;
1908 }
1909
1910
1911 /*-------------------------------------------------------------------------*/
1912
1913 static struct pci_device_id pci_ids [] = { {
1914 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
1915 .class_mask = ~0,
1916 .vendor = 0x102f, /* Toshiba */
1917 .device = 0x0107, /* this UDC */
1918 .subvendor = PCI_ANY_ID,
1919 .subdevice = PCI_ANY_ID,
1920
1921 }, { /* end: all zeroes */ }
1922 };
1923 MODULE_DEVICE_TABLE (pci, pci_ids);
1924
1925 static struct pci_driver goku_pci_driver = {
1926 .name = (char *) driver_name,
1927 .id_table = pci_ids,
1928
1929 .probe = goku_probe,
1930 .remove = goku_remove,
1931
1932 /* FIXME add power management support */
1933 };
1934
1935 static int __init init (void)
1936 {
1937 return pci_register_driver (&goku_pci_driver);
1938 }
1939 module_init (init);
1940
1941 static void __exit cleanup (void)
1942 {
1943 pci_unregister_driver (&goku_pci_driver);
1944 }
1945 module_exit (cleanup);