Linux-2.6.12-rc2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / mips / au1000 / common / usbdev.c
1 /*
2 * BRIEF MODULE DESCRIPTION
3 * Au1000 USB Device-Side (device layer)
4 *
5 * Copyright 2001-2002 MontaVista Software Inc.
6 * Author: MontaVista Software, Inc.
7 * stevel@mvista.com or source@mvista.com
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
20 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29 #include <linux/kernel.h>
30 #include <linux/ioport.h>
31 #include <linux/sched.h>
32 #include <linux/signal.h>
33 #include <linux/errno.h>
34 #include <linux/poll.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/fcntl.h>
38 #include <linux/module.h>
39 #include <linux/spinlock.h>
40 #include <linux/list.h>
41 #include <linux/smp_lock.h>
42 #define DEBUG
43 #include <linux/usb.h>
44
45 #include <asm/io.h>
46 #include <asm/uaccess.h>
47 #include <asm/irq.h>
48 #include <asm/mipsregs.h>
49 #include <asm/au1000.h>
50 #include <asm/au1000_dma.h>
51 #include <asm/au1000_usbdev.h>
52
53 #ifdef DEBUG
54 #undef VDEBUG
55 #ifdef VDEBUG
56 #define vdbg(fmt, arg...) printk(KERN_DEBUG __FILE__ ": " fmt "\n" , ## arg)
57 #else
58 #define vdbg(fmt, arg...) do {} while (0)
59 #endif
60 #else
61 #define vdbg(fmt, arg...) do {} while (0)
62 #endif
63
64 #define ALLOC_FLAGS (in_interrupt () ? GFP_ATOMIC : GFP_KERNEL)
65
66 #define EP_FIFO_DEPTH 8
67
68 typedef enum {
69 SETUP_STAGE = 0,
70 DATA_STAGE,
71 STATUS_STAGE
72 } ep0_stage_t;
73
74 typedef struct {
75 int read_fifo;
76 int write_fifo;
77 int ctrl_stat;
78 int read_fifo_status;
79 int write_fifo_status;
80 } endpoint_reg_t;
81
82 typedef struct {
83 usbdev_pkt_t *head;
84 usbdev_pkt_t *tail;
85 int count;
86 } pkt_list_t;
87
88 typedef struct {
89 int active;
90 struct usb_endpoint_descriptor *desc;
91 endpoint_reg_t *reg;
92 /* Only one of these are used, unless this is the control ep */
93 pkt_list_t inlist;
94 pkt_list_t outlist;
95 unsigned int indma, outdma; /* DMA channel numbers for IN, OUT */
96 /* following are extracted from endpoint descriptor for easy access */
97 int max_pkt_size;
98 int type;
99 int direction;
100 /* WE assign endpoint addresses! */
101 int address;
102 spinlock_t lock;
103 } endpoint_t;
104
105
106 static struct usb_dev {
107 endpoint_t ep[6];
108 ep0_stage_t ep0_stage;
109
110 struct usb_device_descriptor * dev_desc;
111 struct usb_interface_descriptor* if_desc;
112 struct usb_config_descriptor * conf_desc;
113 u8 * full_conf_desc;
114 struct usb_string_descriptor * str_desc[6];
115
116 /* callback to function layer */
117 void (*func_cb)(usbdev_cb_type_t type, unsigned long arg,
118 void *cb_data);
119 void* cb_data;
120
121 usbdev_state_t state; // device state
122 int suspended; // suspended flag
123 int address; // device address
124 int interface;
125 int num_ep;
126 u8 alternate_setting;
127 u8 configuration; // configuration value
128 int remote_wakeup_en;
129 } usbdev;
130
131
132 static endpoint_reg_t ep_reg[] = {
133 // FIFO's 0 and 1 are EP0 default control
134 {USBD_EP0RD, USBD_EP0WR, USBD_EP0CS, USBD_EP0RDSTAT, USBD_EP0WRSTAT },
135 {0},
136 // FIFO 2 is EP2, IN
137 { -1, USBD_EP2WR, USBD_EP2CS, -1, USBD_EP2WRSTAT },
138 // FIFO 3 is EP3, IN
139 { -1, USBD_EP3WR, USBD_EP3CS, -1, USBD_EP3WRSTAT },
140 // FIFO 4 is EP4, OUT
141 {USBD_EP4RD, -1, USBD_EP4CS, USBD_EP4RDSTAT, -1 },
142 // FIFO 5 is EP5, OUT
143 {USBD_EP5RD, -1, USBD_EP5CS, USBD_EP5RDSTAT, -1 }
144 };
145
146 static struct {
147 unsigned int id;
148 const char *str;
149 } ep_dma_id[] = {
150 { DMA_ID_USBDEV_EP0_TX, "USBDev EP0 IN" },
151 { DMA_ID_USBDEV_EP0_RX, "USBDev EP0 OUT" },
152 { DMA_ID_USBDEV_EP2_TX, "USBDev EP2 IN" },
153 { DMA_ID_USBDEV_EP3_TX, "USBDev EP3 IN" },
154 { DMA_ID_USBDEV_EP4_RX, "USBDev EP4 OUT" },
155 { DMA_ID_USBDEV_EP5_RX, "USBDev EP5 OUT" }
156 };
157
158 #define DIR_OUT 0
159 #define DIR_IN (1<<3)
160
161 #define CONTROL_EP USB_ENDPOINT_XFER_CONTROL
162 #define BULK_EP USB_ENDPOINT_XFER_BULK
163
164 static inline endpoint_t *
165 epaddr_to_ep(struct usb_dev* dev, int ep_addr)
166 {
167 if (ep_addr >= 0 && ep_addr < 2)
168 return &dev->ep[0];
169 if (ep_addr < 6)
170 return &dev->ep[ep_addr];
171 return NULL;
172 }
173
174 static const char* std_req_name[] = {
175 "GET_STATUS",
176 "CLEAR_FEATURE",
177 "RESERVED",
178 "SET_FEATURE",
179 "RESERVED",
180 "SET_ADDRESS",
181 "GET_DESCRIPTOR",
182 "SET_DESCRIPTOR",
183 "GET_CONFIGURATION",
184 "SET_CONFIGURATION",
185 "GET_INTERFACE",
186 "SET_INTERFACE",
187 "SYNCH_FRAME"
188 };
189
190 static inline const char*
191 get_std_req_name(int req)
192 {
193 return (req >= 0 && req <= 12) ? std_req_name[req] : "UNKNOWN";
194 }
195
196 #if 0
197 static void
198 dump_setup(struct usb_ctrlrequest* s)
199 {
200 dbg("%s: requesttype=%d", __FUNCTION__, s->requesttype);
201 dbg("%s: request=%d %s", __FUNCTION__, s->request,
202 get_std_req_name(s->request));
203 dbg("%s: value=0x%04x", __FUNCTION__, s->wValue);
204 dbg("%s: index=%d", __FUNCTION__, s->index);
205 dbg("%s: length=%d", __FUNCTION__, s->length);
206 }
207 #endif
208
209 static inline usbdev_pkt_t *
210 alloc_packet(endpoint_t * ep, int data_size, void* data)
211 {
212 usbdev_pkt_t* pkt = kmalloc(sizeof(usbdev_pkt_t) + data_size,
213 ALLOC_FLAGS);
214 if (!pkt)
215 return NULL;
216 pkt->ep_addr = ep->address;
217 pkt->size = data_size;
218 pkt->status = 0;
219 pkt->next = NULL;
220 if (data)
221 memcpy(pkt->payload, data, data_size);
222
223 return pkt;
224 }
225
226
227 /*
228 * Link a packet to the tail of the enpoint's packet list.
229 * EP spinlock must be held when calling.
230 */
231 static void
232 link_tail(endpoint_t * ep, pkt_list_t * list, usbdev_pkt_t * pkt)
233 {
234 if (!list->tail) {
235 list->head = list->tail = pkt;
236 list->count = 1;
237 } else {
238 list->tail->next = pkt;
239 list->tail = pkt;
240 list->count++;
241 }
242 }
243
244 /*
245 * Unlink and return a packet from the head of the given packet
246 * list. It is the responsibility of the caller to free the packet.
247 * EP spinlock must be held when calling.
248 */
249 static usbdev_pkt_t *
250 unlink_head(pkt_list_t * list)
251 {
252 usbdev_pkt_t *pkt;
253
254 pkt = list->head;
255 if (!pkt || !list->count) {
256 return NULL;
257 }
258
259 list->head = pkt->next;
260 if (!list->head) {
261 list->head = list->tail = NULL;
262 list->count = 0;
263 } else
264 list->count--;
265
266 return pkt;
267 }
268
269 /*
270 * Create and attach a new packet to the tail of the enpoint's
271 * packet list. EP spinlock must be held when calling.
272 */
273 static usbdev_pkt_t *
274 add_packet(endpoint_t * ep, pkt_list_t * list, int size)
275 {
276 usbdev_pkt_t *pkt = alloc_packet(ep, size, NULL);
277 if (!pkt)
278 return NULL;
279
280 link_tail(ep, list, pkt);
281 return pkt;
282 }
283
284
285 /*
286 * Unlink and free a packet from the head of the enpoint's
287 * packet list. EP spinlock must be held when calling.
288 */
289 static inline void
290 free_packet(pkt_list_t * list)
291 {
292 kfree(unlink_head(list));
293 }
294
295 /* EP spinlock must be held when calling. */
296 static inline void
297 flush_pkt_list(pkt_list_t * list)
298 {
299 while (list->count)
300 free_packet(list);
301 }
302
303 /* EP spinlock must be held when calling */
304 static inline void
305 flush_write_fifo(endpoint_t * ep)
306 {
307 if (ep->reg->write_fifo_status >= 0) {
308 au_writel(USBDEV_FSTAT_FLUSH | USBDEV_FSTAT_UF |
309 USBDEV_FSTAT_OF,
310 ep->reg->write_fifo_status);
311 //udelay(100);
312 //au_writel(USBDEV_FSTAT_UF | USBDEV_FSTAT_OF,
313 // ep->reg->write_fifo_status);
314 }
315 }
316
317 /* EP spinlock must be held when calling */
318 static inline void
319 flush_read_fifo(endpoint_t * ep)
320 {
321 if (ep->reg->read_fifo_status >= 0) {
322 au_writel(USBDEV_FSTAT_FLUSH | USBDEV_FSTAT_UF |
323 USBDEV_FSTAT_OF,
324 ep->reg->read_fifo_status);
325 //udelay(100);
326 //au_writel(USBDEV_FSTAT_UF | USBDEV_FSTAT_OF,
327 // ep->reg->read_fifo_status);
328 }
329 }
330
331
332 /* EP spinlock must be held when calling. */
333 static void
334 endpoint_flush(endpoint_t * ep)
335 {
336 // First, flush all packets
337 flush_pkt_list(&ep->inlist);
338 flush_pkt_list(&ep->outlist);
339
340 // Now flush the endpoint's h/w FIFO(s)
341 flush_write_fifo(ep);
342 flush_read_fifo(ep);
343 }
344
345 /* EP spinlock must be held when calling. */
346 static void
347 endpoint_stall(endpoint_t * ep)
348 {
349 u32 cs;
350
351 warn(__FUNCTION__);
352
353 cs = au_readl(ep->reg->ctrl_stat) | USBDEV_CS_STALL;
354 au_writel(cs, ep->reg->ctrl_stat);
355 }
356
357 /* EP spinlock must be held when calling. */
358 static void
359 endpoint_unstall(endpoint_t * ep)
360 {
361 u32 cs;
362
363 warn(__FUNCTION__);
364
365 cs = au_readl(ep->reg->ctrl_stat) & ~USBDEV_CS_STALL;
366 au_writel(cs, ep->reg->ctrl_stat);
367 }
368
369 static void
370 endpoint_reset_datatoggle(endpoint_t * ep)
371 {
372 // FIXME: is this possible?
373 }
374
375
376 /* EP spinlock must be held when calling. */
377 static int
378 endpoint_fifo_read(endpoint_t * ep)
379 {
380 int read_count = 0;
381 u8 *bufptr;
382 usbdev_pkt_t *pkt = ep->outlist.tail;
383
384 if (!pkt)
385 return -EINVAL;
386
387 bufptr = &pkt->payload[pkt->size];
388 while (au_readl(ep->reg->read_fifo_status) & USBDEV_FSTAT_FCNT_MASK) {
389 *bufptr++ = au_readl(ep->reg->read_fifo) & 0xff;
390 read_count++;
391 pkt->size++;
392 }
393
394 return read_count;
395 }
396
397 #if 0
398 /* EP spinlock must be held when calling. */
399 static int
400 endpoint_fifo_write(endpoint_t * ep, int index)
401 {
402 int write_count = 0;
403 u8 *bufptr;
404 usbdev_pkt_t *pkt = ep->inlist.head;
405
406 if (!pkt)
407 return -EINVAL;
408
409 bufptr = &pkt->payload[index];
410 while ((au_readl(ep->reg->write_fifo_status) &
411 USBDEV_FSTAT_FCNT_MASK) < EP_FIFO_DEPTH) {
412 if (bufptr < pkt->payload + pkt->size) {
413 au_writel(*bufptr++, ep->reg->write_fifo);
414 write_count++;
415 } else {
416 break;
417 }
418 }
419
420 return write_count;
421 }
422 #endif
423
424 /*
425 * This routine is called to restart transmission of a packet.
426 * The endpoint's TSIZE must be set to the new packet's size,
427 * and DMA to the write FIFO needs to be restarted.
428 * EP spinlock must be held when calling.
429 */
430 static void
431 kickstart_send_packet(endpoint_t * ep)
432 {
433 u32 cs;
434 usbdev_pkt_t *pkt = ep->inlist.head;
435
436 vdbg("%s: ep%d, pkt=%p", __FUNCTION__, ep->address, pkt);
437
438 if (!pkt) {
439 err("%s: head=NULL! list->count=%d", __FUNCTION__,
440 ep->inlist.count);
441 return;
442 }
443
444 dma_cache_wback_inv((unsigned long)pkt->payload, pkt->size);
445
446 /*
447 * make sure FIFO is empty
448 */
449 flush_write_fifo(ep);
450
451 cs = au_readl(ep->reg->ctrl_stat) & USBDEV_CS_STALL;
452 cs |= (pkt->size << USBDEV_CS_TSIZE_BIT);
453 au_writel(cs, ep->reg->ctrl_stat);
454
455 if (get_dma_active_buffer(ep->indma) == 1) {
456 set_dma_count1(ep->indma, pkt->size);
457 set_dma_addr1(ep->indma, virt_to_phys(pkt->payload));
458 enable_dma_buffer1(ep->indma); // reenable
459 } else {
460 set_dma_count0(ep->indma, pkt->size);
461 set_dma_addr0(ep->indma, virt_to_phys(pkt->payload));
462 enable_dma_buffer0(ep->indma); // reenable
463 }
464 if (dma_halted(ep->indma))
465 start_dma(ep->indma);
466 }
467
468
469 /*
470 * This routine is called when a packet in the inlist has been
471 * completed. Frees the completed packet and starts sending the
472 * next. EP spinlock must be held when calling.
473 */
474 static usbdev_pkt_t *
475 send_packet_complete(endpoint_t * ep)
476 {
477 usbdev_pkt_t *pkt = unlink_head(&ep->inlist);
478
479 if (pkt) {
480 pkt->status =
481 (au_readl(ep->reg->ctrl_stat) & USBDEV_CS_NAK) ?
482 PKT_STATUS_NAK : PKT_STATUS_ACK;
483
484 vdbg("%s: ep%d, %s pkt=%p, list count=%d", __FUNCTION__,
485 ep->address, (pkt->status & PKT_STATUS_NAK) ?
486 "NAK" : "ACK", pkt, ep->inlist.count);
487 }
488
489 /*
490 * The write fifo should already be drained if things are
491 * working right, but flush it anyway just in case.
492 */
493 flush_write_fifo(ep);
494
495 // begin transmitting next packet in the inlist
496 if (ep->inlist.count) {
497 kickstart_send_packet(ep);
498 }
499
500 return pkt;
501 }
502
503 /*
504 * Add a new packet to the tail of the given ep's packet
505 * inlist. The transmit complete interrupt frees packets from
506 * the head of this list. EP spinlock must be held when calling.
507 */
508 static int
509 send_packet(struct usb_dev* dev, usbdev_pkt_t *pkt, int async)
510 {
511 pkt_list_t *list;
512 endpoint_t* ep;
513
514 if (!pkt || !(ep = epaddr_to_ep(dev, pkt->ep_addr)))
515 return -EINVAL;
516
517 if (!pkt->size)
518 return 0;
519
520 list = &ep->inlist;
521
522 if (!async && list->count) {
523 halt_dma(ep->indma);
524 flush_pkt_list(list);
525 }
526
527 link_tail(ep, list, pkt);
528
529 vdbg("%s: ep%d, pkt=%p, size=%d, list count=%d", __FUNCTION__,
530 ep->address, pkt, pkt->size, list->count);
531
532 if (list->count == 1) {
533 /*
534 * if the packet count is one, it means the list was empty,
535 * and no more data will go out this ep until we kick-start
536 * it again.
537 */
538 kickstart_send_packet(ep);
539 }
540
541 return pkt->size;
542 }
543
544 /*
545 * This routine is called to restart reception of a packet.
546 * EP spinlock must be held when calling.
547 */
548 static void
549 kickstart_receive_packet(endpoint_t * ep)
550 {
551 usbdev_pkt_t *pkt;
552
553 // get and link a new packet for next reception
554 if (!(pkt = add_packet(ep, &ep->outlist, ep->max_pkt_size))) {
555 err("%s: could not alloc new packet", __FUNCTION__);
556 return;
557 }
558
559 if (get_dma_active_buffer(ep->outdma) == 1) {
560 clear_dma_done1(ep->outdma);
561 set_dma_count1(ep->outdma, ep->max_pkt_size);
562 set_dma_count0(ep->outdma, 0);
563 set_dma_addr1(ep->outdma, virt_to_phys(pkt->payload));
564 enable_dma_buffer1(ep->outdma); // reenable
565 } else {
566 clear_dma_done0(ep->outdma);
567 set_dma_count0(ep->outdma, ep->max_pkt_size);
568 set_dma_count1(ep->outdma, 0);
569 set_dma_addr0(ep->outdma, virt_to_phys(pkt->payload));
570 enable_dma_buffer0(ep->outdma); // reenable
571 }
572 if (dma_halted(ep->outdma))
573 start_dma(ep->outdma);
574 }
575
576
577 /*
578 * This routine is called when a packet in the outlist has been
579 * completed (received) and we need to prepare for a new packet
580 * to be received. Halts DMA and computes the packet size from the
581 * remaining DMA counter. Then prepares a new packet for reception
582 * and restarts DMA. FIXME: what if another packet comes in
583 * on top of the completed packet? Counter would be wrong.
584 * EP spinlock must be held when calling.
585 */
586 static usbdev_pkt_t *
587 receive_packet_complete(endpoint_t * ep)
588 {
589 usbdev_pkt_t *pkt = ep->outlist.tail;
590 u32 cs;
591
592 halt_dma(ep->outdma);
593
594 cs = au_readl(ep->reg->ctrl_stat);
595
596 if (!pkt)
597 return NULL;
598
599 pkt->size = ep->max_pkt_size - get_dma_residue(ep->outdma);
600 if (pkt->size)
601 dma_cache_inv((unsigned long)pkt->payload, pkt->size);
602 /*
603 * need to pull out any remaining bytes in the FIFO.
604 */
605 endpoint_fifo_read(ep);
606 /*
607 * should be drained now, but flush anyway just in case.
608 */
609 flush_read_fifo(ep);
610
611 pkt->status = (cs & USBDEV_CS_NAK) ? PKT_STATUS_NAK : PKT_STATUS_ACK;
612 if (ep->address == 0 && (cs & USBDEV_CS_SU))
613 pkt->status |= PKT_STATUS_SU;
614
615 vdbg("%s: ep%d, %s pkt=%p, size=%d", __FUNCTION__,
616 ep->address, (pkt->status & PKT_STATUS_NAK) ?
617 "NAK" : "ACK", pkt, pkt->size);
618
619 kickstart_receive_packet(ep);
620
621 return pkt;
622 }
623
624
625 /*
626 ****************************************************************************
627 * Here starts the standard device request handlers. They are
628 * all called by do_setup() via a table of function pointers.
629 ****************************************************************************
630 */
631
632 static ep0_stage_t
633 do_get_status(struct usb_dev* dev, struct usb_ctrlrequest* setup)
634 {
635 switch (setup->bRequestType) {
636 case 0x80: // Device
637 // FIXME: send device status
638 break;
639 case 0x81: // Interface
640 // FIXME: send interface status
641 break;
642 case 0x82: // End Point
643 // FIXME: send endpoint status
644 break;
645 default:
646 // Invalid Command
647 endpoint_stall(&dev->ep[0]); // Stall End Point 0
648 break;
649 }
650
651 return STATUS_STAGE;
652 }
653
654 static ep0_stage_t
655 do_clear_feature(struct usb_dev* dev, struct usb_ctrlrequest* setup)
656 {
657 switch (setup->bRequestType) {
658 case 0x00: // Device
659 if ((le16_to_cpu(setup->wValue) & 0xff) == 1)
660 dev->remote_wakeup_en = 0;
661 else
662 endpoint_stall(&dev->ep[0]);
663 break;
664 case 0x02: // End Point
665 if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
666 endpoint_t *ep =
667 epaddr_to_ep(dev,
668 le16_to_cpu(setup->wIndex) & 0xff);
669
670 endpoint_unstall(ep);
671 endpoint_reset_datatoggle(ep);
672 } else
673 endpoint_stall(&dev->ep[0]);
674 break;
675 }
676
677 return SETUP_STAGE;
678 }
679
680 static ep0_stage_t
681 do_reserved(struct usb_dev* dev, struct usb_ctrlrequest* setup)
682 {
683 // Invalid request, stall End Point 0
684 endpoint_stall(&dev->ep[0]);
685 return SETUP_STAGE;
686 }
687
688 static ep0_stage_t
689 do_set_feature(struct usb_dev* dev, struct usb_ctrlrequest* setup)
690 {
691 switch (setup->bRequestType) {
692 case 0x00: // Device
693 if ((le16_to_cpu(setup->wValue) & 0xff) == 1)
694 dev->remote_wakeup_en = 1;
695 else
696 endpoint_stall(&dev->ep[0]);
697 break;
698 case 0x02: // End Point
699 if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
700 endpoint_t *ep =
701 epaddr_to_ep(dev,
702 le16_to_cpu(setup->wIndex) & 0xff);
703
704 endpoint_stall(ep);
705 } else
706 endpoint_stall(&dev->ep[0]);
707 break;
708 }
709
710 return SETUP_STAGE;
711 }
712
713 static ep0_stage_t
714 do_set_address(struct usb_dev* dev, struct usb_ctrlrequest* setup)
715 {
716 int new_state = dev->state;
717 int new_addr = le16_to_cpu(setup->wValue);
718
719 dbg("%s: our address=%d", __FUNCTION__, new_addr);
720
721 if (new_addr > 127) {
722 // usb spec doesn't tell us what to do, so just go to
723 // default state
724 new_state = DEFAULT;
725 dev->address = 0;
726 } else if (dev->address != new_addr) {
727 dev->address = new_addr;
728 new_state = ADDRESS;
729 }
730
731 if (dev->state != new_state) {
732 dev->state = new_state;
733 /* inform function layer of usbdev state change */
734 dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
735 }
736
737 return SETUP_STAGE;
738 }
739
740 static ep0_stage_t
741 do_get_descriptor(struct usb_dev* dev, struct usb_ctrlrequest* setup)
742 {
743 int strnum, desc_len = le16_to_cpu(setup->wLength);
744
745 switch (le16_to_cpu(setup->wValue) >> 8) {
746 case USB_DT_DEVICE:
747 // send device descriptor!
748 desc_len = desc_len > dev->dev_desc->bLength ?
749 dev->dev_desc->bLength : desc_len;
750 dbg("sending device desc, size=%d", desc_len);
751 send_packet(dev, alloc_packet(&dev->ep[0], desc_len,
752 dev->dev_desc), 0);
753 break;
754 case USB_DT_CONFIG:
755 // If the config descr index in low-byte of
756 // setup->wValue is valid, send config descr,
757 // otherwise stall ep0.
758 if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
759 // send config descriptor!
760 if (desc_len <= USB_DT_CONFIG_SIZE) {
761 dbg("sending partial config desc, size=%d",
762 desc_len);
763 send_packet(dev,
764 alloc_packet(&dev->ep[0],
765 desc_len,
766 dev->conf_desc),
767 0);
768 } else {
769 int len = le16_to_cpu(dev->conf_desc->wTotalLength);
770 dbg("sending whole config desc,"
771 " size=%d, our size=%d", desc_len, len);
772 desc_len = desc_len > len ? len : desc_len;
773 send_packet(dev,
774 alloc_packet(&dev->ep[0],
775 desc_len,
776 dev->full_conf_desc),
777 0);
778 }
779 } else
780 endpoint_stall(&dev->ep[0]);
781 break;
782 case USB_DT_STRING:
783 // If the string descr index in low-byte of setup->wValue
784 // is valid, send string descr, otherwise stall ep0.
785 strnum = le16_to_cpu(setup->wValue) & 0xff;
786 if (strnum >= 0 && strnum < 6) {
787 struct usb_string_descriptor *desc =
788 dev->str_desc[strnum];
789 desc_len = desc_len > desc->bLength ?
790 desc->bLength : desc_len;
791 dbg("sending string desc %d", strnum);
792 send_packet(dev,
793 alloc_packet(&dev->ep[0], desc_len,
794 desc), 0);
795 } else
796 endpoint_stall(&dev->ep[0]);
797 break;
798 default:
799 // Invalid request
800 err("invalid get desc=%d, stalled",
801 le16_to_cpu(setup->wValue) >> 8);
802 endpoint_stall(&dev->ep[0]); // Stall endpoint 0
803 break;
804 }
805
806 return STATUS_STAGE;
807 }
808
809 static ep0_stage_t
810 do_set_descriptor(struct usb_dev* dev, struct usb_ctrlrequest* setup)
811 {
812 // TODO: implement
813 // there will be an OUT data stage (the descriptor to set)
814 return DATA_STAGE;
815 }
816
817 static ep0_stage_t
818 do_get_configuration(struct usb_dev* dev, struct usb_ctrlrequest* setup)
819 {
820 // send dev->configuration
821 dbg("sending config");
822 send_packet(dev, alloc_packet(&dev->ep[0], 1, &dev->configuration),
823 0);
824 return STATUS_STAGE;
825 }
826
827 static ep0_stage_t
828 do_set_configuration(struct usb_dev* dev, struct usb_ctrlrequest* setup)
829 {
830 // set active config to low-byte of setup->wValue
831 dev->configuration = le16_to_cpu(setup->wValue) & 0xff;
832 dbg("set config, config=%d", dev->configuration);
833 if (!dev->configuration && dev->state > DEFAULT) {
834 dev->state = ADDRESS;
835 /* inform function layer of usbdev state change */
836 dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
837 } else if (dev->configuration == 1) {
838 dev->state = CONFIGURED;
839 /* inform function layer of usbdev state change */
840 dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
841 } else {
842 // FIXME: "respond with request error" - how?
843 }
844
845 return SETUP_STAGE;
846 }
847
848 static ep0_stage_t
849 do_get_interface(struct usb_dev* dev, struct usb_ctrlrequest* setup)
850 {
851 // interface must be zero.
852 if ((le16_to_cpu(setup->wIndex) & 0xff) || dev->state == ADDRESS) {
853 // FIXME: respond with "request error". how?
854 } else if (dev->state == CONFIGURED) {
855 // send dev->alternate_setting
856 dbg("sending alt setting");
857 send_packet(dev, alloc_packet(&dev->ep[0], 1,
858 &dev->alternate_setting), 0);
859 }
860
861 return STATUS_STAGE;
862
863 }
864
865 static ep0_stage_t
866 do_set_interface(struct usb_dev* dev, struct usb_ctrlrequest* setup)
867 {
868 if (dev->state == ADDRESS) {
869 // FIXME: respond with "request error". how?
870 } else if (dev->state == CONFIGURED) {
871 dev->interface = le16_to_cpu(setup->wIndex) & 0xff;
872 dev->alternate_setting =
873 le16_to_cpu(setup->wValue) & 0xff;
874 // interface and alternate_setting must be zero
875 if (dev->interface || dev->alternate_setting) {
876 // FIXME: respond with "request error". how?
877 }
878 }
879
880 return SETUP_STAGE;
881 }
882
883 static ep0_stage_t
884 do_synch_frame(struct usb_dev* dev, struct usb_ctrlrequest* setup)
885 {
886 // TODO
887 return SETUP_STAGE;
888 }
889
890 typedef ep0_stage_t (*req_method_t)(struct usb_dev* dev,
891 struct usb_ctrlrequest* setup);
892
893
894 /* Table of the standard device request handlers */
895 static const req_method_t req_method[] = {
896 do_get_status,
897 do_clear_feature,
898 do_reserved,
899 do_set_feature,
900 do_reserved,
901 do_set_address,
902 do_get_descriptor,
903 do_set_descriptor,
904 do_get_configuration,
905 do_set_configuration,
906 do_get_interface,
907 do_set_interface,
908 do_synch_frame
909 };
910
911
912 // SETUP packet request dispatcher
913 static void
914 do_setup (struct usb_dev* dev, struct usb_ctrlrequest* setup)
915 {
916 req_method_t m;
917
918 dbg("%s: req %d %s", __FUNCTION__, setup->bRequestType,
919 get_std_req_name(setup->bRequestType));
920
921 if ((setup->bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD ||
922 (setup->bRequestType & USB_RECIP_MASK) != USB_RECIP_DEVICE) {
923 err("%s: invalid requesttype 0x%02x", __FUNCTION__,
924 setup->bRequestType);
925 return;
926 }
927
928 if ((setup->bRequestType & 0x80) == USB_DIR_OUT && setup->wLength)
929 dbg("%s: OUT phase! length=%d", __FUNCTION__, setup->wLength);
930
931 if (setup->bRequestType < sizeof(req_method)/sizeof(req_method_t))
932 m = req_method[setup->bRequestType];
933 else
934 m = do_reserved;
935
936 dev->ep0_stage = (*m)(dev, setup);
937 }
938
939 /*
940 * A SETUP, DATA0, or DATA1 packet has been received
941 * on the default control endpoint's fifo.
942 */
943 static void
944 process_ep0_receive (struct usb_dev* dev)
945 {
946 endpoint_t *ep0 = &dev->ep[0];
947 usbdev_pkt_t *pkt;
948
949 spin_lock(&ep0->lock);
950
951 // complete packet and prepare a new packet
952 pkt = receive_packet_complete(ep0);
953 if (!pkt) {
954 // FIXME: should put a warn/err here.
955 spin_unlock(&ep0->lock);
956 return;
957 }
958
959 // unlink immediately from endpoint.
960 unlink_head(&ep0->outlist);
961
962 // override current stage if h/w says it's a setup packet
963 if (pkt->status & PKT_STATUS_SU)
964 dev->ep0_stage = SETUP_STAGE;
965
966 switch (dev->ep0_stage) {
967 case SETUP_STAGE:
968 vdbg("SU bit is %s in setup stage",
969 (pkt->status & PKT_STATUS_SU) ? "set" : "not set");
970
971 if (pkt->size == sizeof(struct usb_ctrlrequest)) {
972 #ifdef VDEBUG
973 if (pkt->status & PKT_STATUS_ACK)
974 vdbg("received SETUP");
975 else
976 vdbg("received NAK SETUP");
977 #endif
978 do_setup(dev, (struct usb_ctrlrequest*)pkt->payload);
979 } else
980 err("%s: wrong size SETUP received", __FUNCTION__);
981 break;
982 case DATA_STAGE:
983 /*
984 * this setup has an OUT data stage. Of the standard
985 * device requests, only set_descriptor has this stage,
986 * so this packet is that descriptor. TODO: drop it for
987 * now, set_descriptor not implemented.
988 *
989 * Need to place a byte in the write FIFO here, to prepare
990 * to send a zero-length DATA ack packet to the host in the
991 * STATUS stage.
992 */
993 au_writel(0, ep0->reg->write_fifo);
994 dbg("received OUT stage DATAx on EP0, size=%d", pkt->size);
995 dev->ep0_stage = SETUP_STAGE;
996 break;
997 case STATUS_STAGE:
998 // this setup had an IN data stage, and host is ACK'ing
999 // the packet we sent during that stage.
1000 if (pkt->size != 0)
1001 warn("received non-zero ACK on EP0??");
1002 #ifdef VDEBUG
1003 else
1004 vdbg("received ACK on EP0");
1005 #endif
1006 dev->ep0_stage = SETUP_STAGE;
1007 break;
1008 }
1009
1010 spin_unlock(&ep0->lock);
1011 // we're done processing the packet, free it
1012 kfree(pkt);
1013 }
1014
1015
1016 /*
1017 * A DATA0/1 packet has been received on one of the OUT endpoints (4 or 5)
1018 */
1019 static void
1020 process_ep_receive (struct usb_dev* dev, endpoint_t *ep)
1021 {
1022 usbdev_pkt_t *pkt;
1023
1024 spin_lock(&ep->lock);
1025 pkt = receive_packet_complete(ep);
1026 spin_unlock(&ep->lock);
1027
1028 dev->func_cb(CB_PKT_COMPLETE, (unsigned long)pkt, dev->cb_data);
1029 }
1030
1031
1032
1033 /* This ISR handles the receive complete and suspend events */
1034 static void
1035 req_sus_intr (int irq, void *dev_id, struct pt_regs *regs)
1036 {
1037 struct usb_dev *dev = (struct usb_dev *) dev_id;
1038 u32 status;
1039
1040 status = au_readl(USBD_INTSTAT);
1041 au_writel(status, USBD_INTSTAT); // ack'em
1042
1043 if (status & (1<<0))
1044 process_ep0_receive(dev);
1045 if (status & (1<<4))
1046 process_ep_receive(dev, &dev->ep[4]);
1047 if (status & (1<<5))
1048 process_ep_receive(dev, &dev->ep[5]);
1049 }
1050
1051
1052 /* This ISR handles the DMA done events on EP0 */
1053 static void
1054 dma_done_ep0_intr(int irq, void *dev_id, struct pt_regs *regs)
1055 {
1056 struct usb_dev *dev = (struct usb_dev *) dev_id;
1057 usbdev_pkt_t* pkt;
1058 endpoint_t *ep0 = &dev->ep[0];
1059 u32 cs0, buff_done;
1060
1061 spin_lock(&ep0->lock);
1062 cs0 = au_readl(ep0->reg->ctrl_stat);
1063
1064 // first check packet transmit done
1065 if ((buff_done = get_dma_buffer_done(ep0->indma)) != 0) {
1066 // transmitted a DATAx packet during DATA stage
1067 // on control endpoint 0
1068 // clear DMA done bit
1069 if (buff_done & DMA_D0)
1070 clear_dma_done0(ep0->indma);
1071 if (buff_done & DMA_D1)
1072 clear_dma_done1(ep0->indma);
1073
1074 pkt = send_packet_complete(ep0);
1075 if (pkt)
1076 kfree(pkt);
1077 }
1078
1079 /*
1080 * Now check packet receive done. Shouldn't get these,
1081 * the receive packet complete intr should happen
1082 * before the DMA done intr occurs.
1083 */
1084 if ((buff_done = get_dma_buffer_done(ep0->outdma)) != 0) {
1085 // clear DMA done bit
1086 if (buff_done & DMA_D0)
1087 clear_dma_done0(ep0->outdma);
1088 if (buff_done & DMA_D1)
1089 clear_dma_done1(ep0->outdma);
1090
1091 //process_ep0_receive(dev);
1092 }
1093
1094 spin_unlock(&ep0->lock);
1095 }
1096
1097 /* This ISR handles the DMA done events on endpoints 2,3,4,5 */
1098 static void
1099 dma_done_ep_intr(int irq, void *dev_id, struct pt_regs *regs)
1100 {
1101 struct usb_dev *dev = (struct usb_dev *) dev_id;
1102 int i;
1103
1104 for (i = 2; i < 6; i++) {
1105 u32 buff_done;
1106 usbdev_pkt_t* pkt;
1107 endpoint_t *ep = &dev->ep[i];
1108
1109 if (!ep->active) continue;
1110
1111 spin_lock(&ep->lock);
1112
1113 if (ep->direction == USB_DIR_IN) {
1114 buff_done = get_dma_buffer_done(ep->indma);
1115 if (buff_done != 0) {
1116 // transmitted a DATAx pkt on the IN ep
1117 // clear DMA done bit
1118 if (buff_done & DMA_D0)
1119 clear_dma_done0(ep->indma);
1120 if (buff_done & DMA_D1)
1121 clear_dma_done1(ep->indma);
1122
1123 pkt = send_packet_complete(ep);
1124
1125 spin_unlock(&ep->lock);
1126 dev->func_cb(CB_PKT_COMPLETE,
1127 (unsigned long)pkt,
1128 dev->cb_data);
1129 spin_lock(&ep->lock);
1130 }
1131 } else {
1132 /*
1133 * Check packet receive done (OUT ep). Shouldn't get
1134 * these, the rx packet complete intr should happen
1135 * before the DMA done intr occurs.
1136 */
1137 buff_done = get_dma_buffer_done(ep->outdma);
1138 if (buff_done != 0) {
1139 // received a DATAx pkt on the OUT ep
1140 // clear DMA done bit
1141 if (buff_done & DMA_D0)
1142 clear_dma_done0(ep->outdma);
1143 if (buff_done & DMA_D1)
1144 clear_dma_done1(ep->outdma);
1145
1146 //process_ep_receive(dev, ep);
1147 }
1148 }
1149
1150 spin_unlock(&ep->lock);
1151 }
1152 }
1153
1154
1155 /***************************************************************************
1156 * Here begins the external interface functions
1157 ***************************************************************************
1158 */
1159
1160 /*
1161 * allocate a new packet
1162 */
1163 int
1164 usbdev_alloc_packet(int ep_addr, int data_size, usbdev_pkt_t** pkt)
1165 {
1166 endpoint_t * ep = epaddr_to_ep(&usbdev, ep_addr);
1167 usbdev_pkt_t* lpkt = NULL;
1168
1169 if (!ep || !ep->active || ep->address < 2)
1170 return -ENODEV;
1171 if (data_size > ep->max_pkt_size)
1172 return -EINVAL;
1173
1174 lpkt = *pkt = alloc_packet(ep, data_size, NULL);
1175 if (!lpkt)
1176 return -ENOMEM;
1177 return 0;
1178 }
1179
1180
1181 /*
1182 * packet send
1183 */
1184 int
1185 usbdev_send_packet(int ep_addr, usbdev_pkt_t * pkt)
1186 {
1187 unsigned long flags;
1188 int count;
1189 endpoint_t * ep;
1190
1191 if (!pkt || !(ep = epaddr_to_ep(&usbdev, pkt->ep_addr)) ||
1192 !ep->active || ep->address < 2)
1193 return -ENODEV;
1194 if (ep->direction != USB_DIR_IN)
1195 return -EINVAL;
1196
1197 spin_lock_irqsave(&ep->lock, flags);
1198 count = send_packet(&usbdev, pkt, 1);
1199 spin_unlock_irqrestore(&ep->lock, flags);
1200
1201 return count;
1202 }
1203
1204 /*
1205 * packet receive
1206 */
1207 int
1208 usbdev_receive_packet(int ep_addr, usbdev_pkt_t** pkt)
1209 {
1210 unsigned long flags;
1211 usbdev_pkt_t* lpkt = NULL;
1212 endpoint_t *ep = epaddr_to_ep(&usbdev, ep_addr);
1213
1214 if (!ep || !ep->active || ep->address < 2)
1215 return -ENODEV;
1216 if (ep->direction != USB_DIR_OUT)
1217 return -EINVAL;
1218
1219 spin_lock_irqsave(&ep->lock, flags);
1220 if (ep->outlist.count > 1)
1221 lpkt = unlink_head(&ep->outlist);
1222 spin_unlock_irqrestore(&ep->lock, flags);
1223
1224 if (!lpkt) {
1225 /* no packet available */
1226 *pkt = NULL;
1227 return -ENODATA;
1228 }
1229
1230 *pkt = lpkt;
1231
1232 return lpkt->size;
1233 }
1234
1235
1236 /*
1237 * return total queued byte count on the endpoint.
1238 */
1239 int
1240 usbdev_get_byte_count(int ep_addr)
1241 {
1242 unsigned long flags;
1243 pkt_list_t *list;
1244 usbdev_pkt_t *scan;
1245 int count = 0;
1246 endpoint_t * ep = epaddr_to_ep(&usbdev, ep_addr);
1247
1248 if (!ep || !ep->active || ep->address < 2)
1249 return -ENODEV;
1250
1251 if (ep->direction == USB_DIR_IN) {
1252 list = &ep->inlist;
1253
1254 spin_lock_irqsave(&ep->lock, flags);
1255 for (scan = list->head; scan; scan = scan->next)
1256 count += scan->size;
1257 spin_unlock_irqrestore(&ep->lock, flags);
1258 } else {
1259 list = &ep->outlist;
1260
1261 spin_lock_irqsave(&ep->lock, flags);
1262 if (list->count > 1) {
1263 for (scan = list->head; scan != list->tail;
1264 scan = scan->next)
1265 count += scan->size;
1266 }
1267 spin_unlock_irqrestore(&ep->lock, flags);
1268 }
1269
1270 return count;
1271 }
1272
1273
1274 void
1275 usbdev_exit(void)
1276 {
1277 endpoint_t *ep;
1278 int i;
1279
1280 au_writel(0, USBD_INTEN); // disable usb dev ints
1281 au_writel(0, USBD_ENABLE); // disable usb dev
1282
1283 free_irq(AU1000_USB_DEV_REQ_INT, &usbdev);
1284 free_irq(AU1000_USB_DEV_SUS_INT, &usbdev);
1285
1286 // free all control endpoint resources
1287 ep = &usbdev.ep[0];
1288 free_au1000_dma(ep->indma);
1289 free_au1000_dma(ep->outdma);
1290 endpoint_flush(ep);
1291
1292 // free ep resources
1293 for (i = 2; i < 6; i++) {
1294 ep = &usbdev.ep[i];
1295 if (!ep->active) continue;
1296
1297 if (ep->direction == USB_DIR_IN) {
1298 free_au1000_dma(ep->indma);
1299 } else {
1300 free_au1000_dma(ep->outdma);
1301 }
1302 endpoint_flush(ep);
1303 }
1304
1305 if (usbdev.full_conf_desc)
1306 kfree(usbdev.full_conf_desc);
1307 }
1308
1309 int
1310 usbdev_init(struct usb_device_descriptor* dev_desc,
1311 struct usb_config_descriptor* config_desc,
1312 struct usb_interface_descriptor* if_desc,
1313 struct usb_endpoint_descriptor* ep_desc,
1314 struct usb_string_descriptor* str_desc[],
1315 void (*cb)(usbdev_cb_type_t, unsigned long, void *),
1316 void* cb_data)
1317 {
1318 endpoint_t *ep0;
1319 int i, ret=0;
1320 u8* fcd;
1321
1322 if (dev_desc->bNumConfigurations > 1 ||
1323 config_desc->bNumInterfaces > 1 ||
1324 if_desc->bNumEndpoints > 4) {
1325 err("Only one config, one i/f, and no more "
1326 "than 4 ep's allowed");
1327 ret = -EINVAL;
1328 goto out;
1329 }
1330
1331 if (!cb) {
1332 err("Function-layer callback required");
1333 ret = -EINVAL;
1334 goto out;
1335 }
1336
1337 if (dev_desc->bMaxPacketSize0 != USBDEV_EP0_MAX_PACKET_SIZE) {
1338 warn("EP0 Max Packet size must be %d",
1339 USBDEV_EP0_MAX_PACKET_SIZE);
1340 dev_desc->bMaxPacketSize0 = USBDEV_EP0_MAX_PACKET_SIZE;
1341 }
1342
1343 memset(&usbdev, 0, sizeof(struct usb_dev));
1344
1345 usbdev.state = DEFAULT;
1346 usbdev.dev_desc = dev_desc;
1347 usbdev.if_desc = if_desc;
1348 usbdev.conf_desc = config_desc;
1349 for (i=0; i<6; i++)
1350 usbdev.str_desc[i] = str_desc[i];
1351 usbdev.func_cb = cb;
1352 usbdev.cb_data = cb_data;
1353
1354 /* Initialize default control endpoint */
1355 ep0 = &usbdev.ep[0];
1356 ep0->active = 1;
1357 ep0->type = CONTROL_EP;
1358 ep0->max_pkt_size = USBDEV_EP0_MAX_PACKET_SIZE;
1359 spin_lock_init(&ep0->lock);
1360 ep0->desc = NULL; // ep0 has no descriptor
1361 ep0->address = 0;
1362 ep0->direction = 0;
1363 ep0->reg = &ep_reg[0];
1364
1365 /* Initialize the other requested endpoints */
1366 for (i = 0; i < if_desc->bNumEndpoints; i++) {
1367 struct usb_endpoint_descriptor* epd = &ep_desc[i];
1368 endpoint_t *ep;
1369
1370 if ((epd->bEndpointAddress & 0x80) == USB_DIR_IN) {
1371 ep = &usbdev.ep[2];
1372 ep->address = 2;
1373 if (ep->active) {
1374 ep = &usbdev.ep[3];
1375 ep->address = 3;
1376 if (ep->active) {
1377 err("too many IN ep's requested");
1378 ret = -ENODEV;
1379 goto out;
1380 }
1381 }
1382 } else {
1383 ep = &usbdev.ep[4];
1384 ep->address = 4;
1385 if (ep->active) {
1386 ep = &usbdev.ep[5];
1387 ep->address = 5;
1388 if (ep->active) {
1389 err("too many OUT ep's requested");
1390 ret = -ENODEV;
1391 goto out;
1392 }
1393 }
1394 }
1395
1396 ep->active = 1;
1397 epd->bEndpointAddress &= ~0x0f;
1398 epd->bEndpointAddress |= (u8)ep->address;
1399 ep->direction = epd->bEndpointAddress & 0x80;
1400 ep->type = epd->bmAttributes & 0x03;
1401 ep->max_pkt_size = le16_to_cpu(epd->wMaxPacketSize);
1402 spin_lock_init(&ep->lock);
1403 ep->desc = epd;
1404 ep->reg = &ep_reg[ep->address];
1405 }
1406
1407 /*
1408 * initialize the full config descriptor
1409 */
1410 usbdev.full_conf_desc = fcd = kmalloc(le16_to_cpu(config_desc->wTotalLength),
1411 ALLOC_FLAGS);
1412 if (!fcd) {
1413 err("failed to alloc full config descriptor");
1414 ret = -ENOMEM;
1415 goto out;
1416 }
1417
1418 memcpy(fcd, config_desc, USB_DT_CONFIG_SIZE);
1419 fcd += USB_DT_CONFIG_SIZE;
1420 memcpy(fcd, if_desc, USB_DT_INTERFACE_SIZE);
1421 fcd += USB_DT_INTERFACE_SIZE;
1422 for (i = 0; i < if_desc->bNumEndpoints; i++) {
1423 memcpy(fcd, &ep_desc[i], USB_DT_ENDPOINT_SIZE);
1424 fcd += USB_DT_ENDPOINT_SIZE;
1425 }
1426
1427 /* Now we're ready to enable the controller */
1428 au_writel(0x0002, USBD_ENABLE);
1429 udelay(100);
1430 au_writel(0x0003, USBD_ENABLE);
1431 udelay(100);
1432
1433 /* build and send config table based on ep descriptors */
1434 for (i = 0; i < 6; i++) {
1435 endpoint_t *ep;
1436 if (i == 1)
1437 continue; // skip dummy ep
1438 ep = &usbdev.ep[i];
1439 if (ep->active) {
1440 au_writel((ep->address << 4) | 0x04, USBD_CONFIG);
1441 au_writel(((ep->max_pkt_size & 0x380) >> 7) |
1442 (ep->direction >> 4) | (ep->type << 4),
1443 USBD_CONFIG);
1444 au_writel((ep->max_pkt_size & 0x7f) << 1, USBD_CONFIG);
1445 au_writel(0x00, USBD_CONFIG);
1446 au_writel(ep->address, USBD_CONFIG);
1447 } else {
1448 u8 dir = (i==2 || i==3) ? DIR_IN : DIR_OUT;
1449 au_writel((i << 4) | 0x04, USBD_CONFIG);
1450 au_writel(((16 & 0x380) >> 7) | dir |
1451 (BULK_EP << 4), USBD_CONFIG);
1452 au_writel((16 & 0x7f) << 1, USBD_CONFIG);
1453 au_writel(0x00, USBD_CONFIG);
1454 au_writel(i, USBD_CONFIG);
1455 }
1456 }
1457
1458 /*
1459 * Enable Receive FIFO Complete interrupts only. Transmit
1460 * complete is being handled by the DMA done interrupts.
1461 */
1462 au_writel(0x31, USBD_INTEN);
1463
1464 /*
1465 * Controller is now enabled, request DMA and IRQ
1466 * resources.
1467 */
1468
1469 /* request the USB device transfer complete interrupt */
1470 if (request_irq(AU1000_USB_DEV_REQ_INT, req_sus_intr, SA_INTERRUPT,
1471 "USBdev req", &usbdev)) {
1472 err("Can't get device request intr");
1473 ret = -ENXIO;
1474 goto out;
1475 }
1476 /* request the USB device suspend interrupt */
1477 if (request_irq(AU1000_USB_DEV_SUS_INT, req_sus_intr, SA_INTERRUPT,
1478 "USBdev sus", &usbdev)) {
1479 err("Can't get device suspend intr");
1480 ret = -ENXIO;
1481 goto out;
1482 }
1483
1484 /* Request EP0 DMA and IRQ */
1485 if ((ep0->indma = request_au1000_dma(ep_dma_id[0].id,
1486 ep_dma_id[0].str,
1487 dma_done_ep0_intr,
1488 SA_INTERRUPT,
1489 &usbdev)) < 0) {
1490 err("Can't get %s DMA", ep_dma_id[0].str);
1491 ret = -ENXIO;
1492 goto out;
1493 }
1494 if ((ep0->outdma = request_au1000_dma(ep_dma_id[1].id,
1495 ep_dma_id[1].str,
1496 NULL, 0, NULL)) < 0) {
1497 err("Can't get %s DMA", ep_dma_id[1].str);
1498 ret = -ENXIO;
1499 goto out;
1500 }
1501
1502 // Flush the ep0 buffers and FIFOs
1503 endpoint_flush(ep0);
1504 // start packet reception on ep0
1505 kickstart_receive_packet(ep0);
1506
1507 /* Request DMA and IRQ for the other endpoints */
1508 for (i = 2; i < 6; i++) {
1509 endpoint_t *ep = &usbdev.ep[i];
1510 if (!ep->active)
1511 continue;
1512
1513 // Flush the endpoint buffers and FIFOs
1514 endpoint_flush(ep);
1515
1516 if (ep->direction == USB_DIR_IN) {
1517 ep->indma =
1518 request_au1000_dma(ep_dma_id[ep->address].id,
1519 ep_dma_id[ep->address].str,
1520 dma_done_ep_intr,
1521 SA_INTERRUPT,
1522 &usbdev);
1523 if (ep->indma < 0) {
1524 err("Can't get %s DMA",
1525 ep_dma_id[ep->address].str);
1526 ret = -ENXIO;
1527 goto out;
1528 }
1529 } else {
1530 ep->outdma =
1531 request_au1000_dma(ep_dma_id[ep->address].id,
1532 ep_dma_id[ep->address].str,
1533 NULL, 0, NULL);
1534 if (ep->outdma < 0) {
1535 err("Can't get %s DMA",
1536 ep_dma_id[ep->address].str);
1537 ret = -ENXIO;
1538 goto out;
1539 }
1540
1541 // start packet reception on OUT endpoint
1542 kickstart_receive_packet(ep);
1543 }
1544 }
1545
1546 out:
1547 if (ret)
1548 usbdev_exit();
1549 return ret;
1550 }
1551
1552 EXPORT_SYMBOL(usbdev_init);
1553 EXPORT_SYMBOL(usbdev_exit);
1554 EXPORT_SYMBOL(usbdev_alloc_packet);
1555 EXPORT_SYMBOL(usbdev_receive_packet);
1556 EXPORT_SYMBOL(usbdev_send_packet);
1557 EXPORT_SYMBOL(usbdev_get_byte_count);