usb: chipidea: split the driver code into units
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / chipidea / udc.c
1 /*
2 * udc.h - ChipIdea UDC driver
3 *
4 * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
5 *
6 * Author: David Lopo
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/dmapool.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/init.h>
18 #include <linux/platform_device.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/irq.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/usb/ch9.h>
27 #include <linux/usb/gadget.h>
28 #include <linux/usb/otg.h>
29 #include <linux/usb/chipidea.h>
30
31 #include "ci.h"
32 #include "udc.h"
33 #include "bits.h"
34 #include "debug.h"
35
36 /* control endpoint description */
37 static const struct usb_endpoint_descriptor
38 ctrl_endpt_out_desc = {
39 .bLength = USB_DT_ENDPOINT_SIZE,
40 .bDescriptorType = USB_DT_ENDPOINT,
41
42 .bEndpointAddress = USB_DIR_OUT,
43 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
44 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
45 };
46
47 static const struct usb_endpoint_descriptor
48 ctrl_endpt_in_desc = {
49 .bLength = USB_DT_ENDPOINT_SIZE,
50 .bDescriptorType = USB_DT_ENDPOINT,
51
52 .bEndpointAddress = USB_DIR_IN,
53 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
54 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
55 };
56
57 /**
58 * hw_ep_bit: calculates the bit number
59 * @num: endpoint number
60 * @dir: endpoint direction
61 *
62 * This function returns bit number
63 */
64 static inline int hw_ep_bit(int num, int dir)
65 {
66 return num + (dir ? 16 : 0);
67 }
68
69 static inline int ep_to_bit(struct ci13xxx *udc, int n)
70 {
71 int fill = 16 - udc->hw_ep_max / 2;
72
73 if (n >= udc->hw_ep_max / 2)
74 n += fill;
75
76 return n;
77 }
78
79 /**
80 * hw_device_state: enables/disables interrupts & starts/stops device (execute
81 * without interruption)
82 * @dma: 0 => disable, !0 => enable and set dma engine
83 *
84 * This function returns an error code
85 */
86 static int hw_device_state(struct ci13xxx *udc, u32 dma)
87 {
88 if (dma) {
89 hw_write(udc, OP_ENDPTLISTADDR, ~0, dma);
90 /* interrupt, error, port change, reset, sleep/suspend */
91 hw_write(udc, OP_USBINTR, ~0,
92 USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
93 hw_write(udc, OP_USBCMD, USBCMD_RS, USBCMD_RS);
94 } else {
95 hw_write(udc, OP_USBCMD, USBCMD_RS, 0);
96 hw_write(udc, OP_USBINTR, ~0, 0);
97 }
98 return 0;
99 }
100
101 /**
102 * hw_ep_flush: flush endpoint fifo (execute without interruption)
103 * @num: endpoint number
104 * @dir: endpoint direction
105 *
106 * This function returns an error code
107 */
108 static int hw_ep_flush(struct ci13xxx *udc, int num, int dir)
109 {
110 int n = hw_ep_bit(num, dir);
111
112 do {
113 /* flush any pending transfer */
114 hw_write(udc, OP_ENDPTFLUSH, BIT(n), BIT(n));
115 while (hw_read(udc, OP_ENDPTFLUSH, BIT(n)))
116 cpu_relax();
117 } while (hw_read(udc, OP_ENDPTSTAT, BIT(n)));
118
119 return 0;
120 }
121
122 /**
123 * hw_ep_disable: disables endpoint (execute without interruption)
124 * @num: endpoint number
125 * @dir: endpoint direction
126 *
127 * This function returns an error code
128 */
129 static int hw_ep_disable(struct ci13xxx *udc, int num, int dir)
130 {
131 hw_ep_flush(udc, num, dir);
132 hw_write(udc, OP_ENDPTCTRL + num,
133 dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
134 return 0;
135 }
136
137 /**
138 * hw_ep_enable: enables endpoint (execute without interruption)
139 * @num: endpoint number
140 * @dir: endpoint direction
141 * @type: endpoint type
142 *
143 * This function returns an error code
144 */
145 static int hw_ep_enable(struct ci13xxx *udc, int num, int dir, int type)
146 {
147 u32 mask, data;
148
149 if (dir) {
150 mask = ENDPTCTRL_TXT; /* type */
151 data = type << ffs_nr(mask);
152
153 mask |= ENDPTCTRL_TXS; /* unstall */
154 mask |= ENDPTCTRL_TXR; /* reset data toggle */
155 data |= ENDPTCTRL_TXR;
156 mask |= ENDPTCTRL_TXE; /* enable */
157 data |= ENDPTCTRL_TXE;
158 } else {
159 mask = ENDPTCTRL_RXT; /* type */
160 data = type << ffs_nr(mask);
161
162 mask |= ENDPTCTRL_RXS; /* unstall */
163 mask |= ENDPTCTRL_RXR; /* reset data toggle */
164 data |= ENDPTCTRL_RXR;
165 mask |= ENDPTCTRL_RXE; /* enable */
166 data |= ENDPTCTRL_RXE;
167 }
168 hw_write(udc, OP_ENDPTCTRL + num, mask, data);
169 return 0;
170 }
171
172 /**
173 * hw_ep_get_halt: return endpoint halt status
174 * @num: endpoint number
175 * @dir: endpoint direction
176 *
177 * This function returns 1 if endpoint halted
178 */
179 static int hw_ep_get_halt(struct ci13xxx *udc, int num, int dir)
180 {
181 u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
182
183 return hw_read(udc, OP_ENDPTCTRL + num, mask) ? 1 : 0;
184 }
185
186 /**
187 * hw_test_and_clear_setup_status: test & clear setup status (execute without
188 * interruption)
189 * @n: endpoint number
190 *
191 * This function returns setup status
192 */
193 static int hw_test_and_clear_setup_status(struct ci13xxx *udc, int n)
194 {
195 n = ep_to_bit(udc, n);
196 return hw_test_and_clear(udc, OP_ENDPTSETUPSTAT, BIT(n));
197 }
198
199 /**
200 * hw_ep_prime: primes endpoint (execute without interruption)
201 * @num: endpoint number
202 * @dir: endpoint direction
203 * @is_ctrl: true if control endpoint
204 *
205 * This function returns an error code
206 */
207 static int hw_ep_prime(struct ci13xxx *udc, int num, int dir, int is_ctrl)
208 {
209 int n = hw_ep_bit(num, dir);
210
211 if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
212 return -EAGAIN;
213
214 hw_write(udc, OP_ENDPTPRIME, BIT(n), BIT(n));
215
216 while (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
217 cpu_relax();
218 if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
219 return -EAGAIN;
220
221 /* status shoult be tested according with manual but it doesn't work */
222 return 0;
223 }
224
225 /**
226 * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
227 * without interruption)
228 * @num: endpoint number
229 * @dir: endpoint direction
230 * @value: true => stall, false => unstall
231 *
232 * This function returns an error code
233 */
234 static int hw_ep_set_halt(struct ci13xxx *udc, int num, int dir, int value)
235 {
236 if (value != 0 && value != 1)
237 return -EINVAL;
238
239 do {
240 enum ci13xxx_regs reg = OP_ENDPTCTRL + num;
241 u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
242 u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
243
244 /* data toggle - reserved for EP0 but it's in ESS */
245 hw_write(udc, reg, mask_xs|mask_xr,
246 value ? mask_xs : mask_xr);
247 } while (value != hw_ep_get_halt(udc, num, dir));
248
249 return 0;
250 }
251
252 /**
253 * hw_is_port_high_speed: test if port is high speed
254 *
255 * This function returns true if high speed port
256 */
257 static int hw_port_is_high_speed(struct ci13xxx *udc)
258 {
259 return udc->hw_bank.lpm ? hw_read(udc, OP_DEVLC, DEVLC_PSPD) :
260 hw_read(udc, OP_PORTSC, PORTSC_HSP);
261 }
262
263 /**
264 * hw_read_intr_enable: returns interrupt enable register
265 *
266 * This function returns register data
267 */
268 static u32 hw_read_intr_enable(struct ci13xxx *udc)
269 {
270 return hw_read(udc, OP_USBINTR, ~0);
271 }
272
273 /**
274 * hw_read_intr_status: returns interrupt status register
275 *
276 * This function returns register data
277 */
278 static u32 hw_read_intr_status(struct ci13xxx *udc)
279 {
280 return hw_read(udc, OP_USBSTS, ~0);
281 }
282
283 /**
284 * hw_test_and_clear_complete: test & clear complete status (execute without
285 * interruption)
286 * @n: endpoint number
287 *
288 * This function returns complete status
289 */
290 static int hw_test_and_clear_complete(struct ci13xxx *udc, int n)
291 {
292 n = ep_to_bit(udc, n);
293 return hw_test_and_clear(udc, OP_ENDPTCOMPLETE, BIT(n));
294 }
295
296 /**
297 * hw_test_and_clear_intr_active: test & clear active interrupts (execute
298 * without interruption)
299 *
300 * This function returns active interrutps
301 */
302 static u32 hw_test_and_clear_intr_active(struct ci13xxx *udc)
303 {
304 u32 reg = hw_read_intr_status(udc) & hw_read_intr_enable(udc);
305
306 hw_write(udc, OP_USBSTS, ~0, reg);
307 return reg;
308 }
309
310 /**
311 * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
312 * interruption)
313 *
314 * This function returns guard value
315 */
316 static int hw_test_and_clear_setup_guard(struct ci13xxx *udc)
317 {
318 return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, 0);
319 }
320
321 /**
322 * hw_test_and_set_setup_guard: test & set setup guard (execute without
323 * interruption)
324 *
325 * This function returns guard value
326 */
327 static int hw_test_and_set_setup_guard(struct ci13xxx *udc)
328 {
329 return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
330 }
331
332 /**
333 * hw_usb_set_address: configures USB address (execute without interruption)
334 * @value: new USB address
335 *
336 * This function explicitly sets the address, without the "USBADRA" (advance)
337 * feature, which is not supported by older versions of the controller.
338 */
339 static void hw_usb_set_address(struct ci13xxx *udc, u8 value)
340 {
341 hw_write(udc, OP_DEVICEADDR, DEVICEADDR_USBADR,
342 value << ffs_nr(DEVICEADDR_USBADR));
343 }
344
345 /**
346 * hw_usb_reset: restart device after a bus reset (execute without
347 * interruption)
348 *
349 * This function returns an error code
350 */
351 static int hw_usb_reset(struct ci13xxx *udc)
352 {
353 hw_usb_set_address(udc, 0);
354
355 /* ESS flushes only at end?!? */
356 hw_write(udc, OP_ENDPTFLUSH, ~0, ~0);
357
358 /* clear setup token semaphores */
359 hw_write(udc, OP_ENDPTSETUPSTAT, 0, 0);
360
361 /* clear complete status */
362 hw_write(udc, OP_ENDPTCOMPLETE, 0, 0);
363
364 /* wait until all bits cleared */
365 while (hw_read(udc, OP_ENDPTPRIME, ~0))
366 udelay(10); /* not RTOS friendly */
367
368 /* reset all endpoints ? */
369
370 /* reset internal status and wait for further instructions
371 no need to verify the port reset status (ESS does it) */
372
373 return 0;
374 }
375
376 /******************************************************************************
377 * UTIL block
378 *****************************************************************************/
379 /**
380 * _usb_addr: calculates endpoint address from direction & number
381 * @ep: endpoint
382 */
383 static inline u8 _usb_addr(struct ci13xxx_ep *ep)
384 {
385 return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
386 }
387
388 /**
389 * _hardware_queue: configures a request at hardware level
390 * @gadget: gadget
391 * @mEp: endpoint
392 *
393 * This function returns an error code
394 */
395 static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
396 {
397 struct ci13xxx *udc = mEp->udc;
398 unsigned i;
399 int ret = 0;
400 unsigned length = mReq->req.length;
401
402 /* don't queue twice */
403 if (mReq->req.status == -EALREADY)
404 return -EALREADY;
405
406 mReq->req.status = -EALREADY;
407 if (length && mReq->req.dma == DMA_ADDR_INVALID) {
408 mReq->req.dma = \
409 dma_map_single(mEp->device, mReq->req.buf,
410 length, mEp->dir ? DMA_TO_DEVICE :
411 DMA_FROM_DEVICE);
412 if (mReq->req.dma == 0)
413 return -ENOMEM;
414
415 mReq->map = 1;
416 }
417
418 if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
419 mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
420 &mReq->zdma);
421 if (mReq->zptr == NULL) {
422 if (mReq->map) {
423 dma_unmap_single(mEp->device, mReq->req.dma,
424 length, mEp->dir ? DMA_TO_DEVICE :
425 DMA_FROM_DEVICE);
426 mReq->req.dma = DMA_ADDR_INVALID;
427 mReq->map = 0;
428 }
429 return -ENOMEM;
430 }
431 memset(mReq->zptr, 0, sizeof(*mReq->zptr));
432 mReq->zptr->next = TD_TERMINATE;
433 mReq->zptr->token = TD_STATUS_ACTIVE;
434 if (!mReq->req.no_interrupt)
435 mReq->zptr->token |= TD_IOC;
436 }
437 /*
438 * TD configuration
439 * TODO - handle requests which spawns into several TDs
440 */
441 memset(mReq->ptr, 0, sizeof(*mReq->ptr));
442 mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES);
443 mReq->ptr->token &= TD_TOTAL_BYTES;
444 mReq->ptr->token |= TD_STATUS_ACTIVE;
445 if (mReq->zptr) {
446 mReq->ptr->next = mReq->zdma;
447 } else {
448 mReq->ptr->next = TD_TERMINATE;
449 if (!mReq->req.no_interrupt)
450 mReq->ptr->token |= TD_IOC;
451 }
452 mReq->ptr->page[0] = mReq->req.dma;
453 for (i = 1; i < 5; i++)
454 mReq->ptr->page[i] =
455 (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
456
457 if (!list_empty(&mEp->qh.queue)) {
458 struct ci13xxx_req *mReqPrev;
459 int n = hw_ep_bit(mEp->num, mEp->dir);
460 int tmp_stat;
461
462 mReqPrev = list_entry(mEp->qh.queue.prev,
463 struct ci13xxx_req, queue);
464 if (mReqPrev->zptr)
465 mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
466 else
467 mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
468 wmb();
469 if (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
470 goto done;
471 do {
472 hw_write(udc, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
473 tmp_stat = hw_read(udc, OP_ENDPTSTAT, BIT(n));
474 } while (!hw_read(udc, OP_USBCMD, USBCMD_ATDTW));
475 hw_write(udc, OP_USBCMD, USBCMD_ATDTW, 0);
476 if (tmp_stat)
477 goto done;
478 }
479
480 /* QH configuration */
481 mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */
482 mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */
483 mEp->qh.ptr->cap |= QH_ZLT;
484
485 wmb(); /* synchronize before ep prime */
486
487 ret = hw_ep_prime(udc, mEp->num, mEp->dir,
488 mEp->type == USB_ENDPOINT_XFER_CONTROL);
489 done:
490 return ret;
491 }
492
493 /**
494 * _hardware_dequeue: handles a request at hardware level
495 * @gadget: gadget
496 * @mEp: endpoint
497 *
498 * This function returns an error code
499 */
500 static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
501 {
502 if (mReq->req.status != -EALREADY)
503 return -EINVAL;
504
505 if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
506 return -EBUSY;
507
508 if (mReq->zptr) {
509 if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
510 return -EBUSY;
511 dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
512 mReq->zptr = NULL;
513 }
514
515 mReq->req.status = 0;
516
517 if (mReq->map) {
518 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
519 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
520 mReq->req.dma = DMA_ADDR_INVALID;
521 mReq->map = 0;
522 }
523
524 mReq->req.status = mReq->ptr->token & TD_STATUS;
525 if ((TD_STATUS_HALTED & mReq->req.status) != 0)
526 mReq->req.status = -1;
527 else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
528 mReq->req.status = -1;
529 else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
530 mReq->req.status = -1;
531
532 mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES;
533 mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
534 mReq->req.actual = mReq->req.length - mReq->req.actual;
535 mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
536
537 return mReq->req.actual;
538 }
539
540 /**
541 * _ep_nuke: dequeues all endpoint requests
542 * @mEp: endpoint
543 *
544 * This function returns an error code
545 * Caller must hold lock
546 */
547 static int _ep_nuke(struct ci13xxx_ep *mEp)
548 __releases(mEp->lock)
549 __acquires(mEp->lock)
550 {
551 if (mEp == NULL)
552 return -EINVAL;
553
554 hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
555
556 while (!list_empty(&mEp->qh.queue)) {
557
558 /* pop oldest request */
559 struct ci13xxx_req *mReq = \
560 list_entry(mEp->qh.queue.next,
561 struct ci13xxx_req, queue);
562 list_del_init(&mReq->queue);
563 mReq->req.status = -ESHUTDOWN;
564
565 if (mReq->req.complete != NULL) {
566 spin_unlock(mEp->lock);
567 mReq->req.complete(&mEp->ep, &mReq->req);
568 spin_lock(mEp->lock);
569 }
570 }
571 return 0;
572 }
573
574 /**
575 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
576 * @gadget: gadget
577 *
578 * This function returns an error code
579 */
580 static int _gadget_stop_activity(struct usb_gadget *gadget)
581 {
582 struct usb_ep *ep;
583 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
584 unsigned long flags;
585
586 if (gadget == NULL)
587 return -EINVAL;
588
589 spin_lock_irqsave(&udc->lock, flags);
590 udc->gadget.speed = USB_SPEED_UNKNOWN;
591 udc->remote_wakeup = 0;
592 udc->suspended = 0;
593 spin_unlock_irqrestore(&udc->lock, flags);
594
595 /* flush all endpoints */
596 gadget_for_each_ep(ep, gadget) {
597 usb_ep_fifo_flush(ep);
598 }
599 usb_ep_fifo_flush(&udc->ep0out->ep);
600 usb_ep_fifo_flush(&udc->ep0in->ep);
601
602 if (udc->driver)
603 udc->driver->disconnect(gadget);
604
605 /* make sure to disable all endpoints */
606 gadget_for_each_ep(ep, gadget) {
607 usb_ep_disable(ep);
608 }
609
610 if (udc->status != NULL) {
611 usb_ep_free_request(&udc->ep0in->ep, udc->status);
612 udc->status = NULL;
613 }
614
615 return 0;
616 }
617
618 /******************************************************************************
619 * ISR block
620 *****************************************************************************/
621 /**
622 * isr_reset_handler: USB reset interrupt handler
623 * @udc: UDC device
624 *
625 * This function resets USB engine after a bus reset occurred
626 */
627 static void isr_reset_handler(struct ci13xxx *udc)
628 __releases(udc->lock)
629 __acquires(udc->lock)
630 {
631 int retval;
632
633 dbg_event(0xFF, "BUS RST", 0);
634
635 spin_unlock(&udc->lock);
636 retval = _gadget_stop_activity(&udc->gadget);
637 if (retval)
638 goto done;
639
640 retval = hw_usb_reset(udc);
641 if (retval)
642 goto done;
643
644 udc->status = usb_ep_alloc_request(&udc->ep0in->ep, GFP_ATOMIC);
645 if (udc->status == NULL)
646 retval = -ENOMEM;
647
648 spin_lock(&udc->lock);
649
650 done:
651 if (retval)
652 dev_err(udc->dev, "error: %i\n", retval);
653 }
654
655 /**
656 * isr_get_status_complete: get_status request complete function
657 * @ep: endpoint
658 * @req: request handled
659 *
660 * Caller must release lock
661 */
662 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
663 {
664 if (ep == NULL || req == NULL)
665 return;
666
667 kfree(req->buf);
668 usb_ep_free_request(ep, req);
669 }
670
671 /**
672 * isr_get_status_response: get_status request response
673 * @udc: udc struct
674 * @setup: setup request packet
675 *
676 * This function returns an error code
677 */
678 static int isr_get_status_response(struct ci13xxx *udc,
679 struct usb_ctrlrequest *setup)
680 __releases(mEp->lock)
681 __acquires(mEp->lock)
682 {
683 struct ci13xxx_ep *mEp = udc->ep0in;
684 struct usb_request *req = NULL;
685 gfp_t gfp_flags = GFP_ATOMIC;
686 int dir, num, retval;
687
688 if (mEp == NULL || setup == NULL)
689 return -EINVAL;
690
691 spin_unlock(mEp->lock);
692 req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
693 spin_lock(mEp->lock);
694 if (req == NULL)
695 return -ENOMEM;
696
697 req->complete = isr_get_status_complete;
698 req->length = 2;
699 req->buf = kzalloc(req->length, gfp_flags);
700 if (req->buf == NULL) {
701 retval = -ENOMEM;
702 goto err_free_req;
703 }
704
705 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
706 /* Assume that device is bus powered for now. */
707 *(u16 *)req->buf = udc->remote_wakeup << 1;
708 retval = 0;
709 } else if ((setup->bRequestType & USB_RECIP_MASK) \
710 == USB_RECIP_ENDPOINT) {
711 dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
712 TX : RX;
713 num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
714 *(u16 *)req->buf = hw_ep_get_halt(udc, num, dir);
715 }
716 /* else do nothing; reserved for future use */
717
718 spin_unlock(mEp->lock);
719 retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
720 spin_lock(mEp->lock);
721 if (retval)
722 goto err_free_buf;
723
724 return 0;
725
726 err_free_buf:
727 kfree(req->buf);
728 err_free_req:
729 spin_unlock(mEp->lock);
730 usb_ep_free_request(&mEp->ep, req);
731 spin_lock(mEp->lock);
732 return retval;
733 }
734
735 /**
736 * isr_setup_status_complete: setup_status request complete function
737 * @ep: endpoint
738 * @req: request handled
739 *
740 * Caller must release lock. Put the port in test mode if test mode
741 * feature is selected.
742 */
743 static void
744 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
745 {
746 struct ci13xxx *udc = req->context;
747 unsigned long flags;
748
749 if (udc->setaddr) {
750 hw_usb_set_address(udc, udc->address);
751 udc->setaddr = false;
752 }
753
754 spin_lock_irqsave(&udc->lock, flags);
755 if (udc->test_mode)
756 hw_port_test_set(udc, udc->test_mode);
757 spin_unlock_irqrestore(&udc->lock, flags);
758 }
759
760 /**
761 * isr_setup_status_phase: queues the status phase of a setup transation
762 * @udc: udc struct
763 *
764 * This function returns an error code
765 */
766 static int isr_setup_status_phase(struct ci13xxx *udc)
767 __releases(mEp->lock)
768 __acquires(mEp->lock)
769 {
770 int retval;
771 struct ci13xxx_ep *mEp;
772
773 mEp = (udc->ep0_dir == TX) ? udc->ep0out : udc->ep0in;
774 udc->status->context = udc;
775 udc->status->complete = isr_setup_status_complete;
776
777 spin_unlock(mEp->lock);
778 retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
779 spin_lock(mEp->lock);
780
781 return retval;
782 }
783
784 /**
785 * isr_tr_complete_low: transaction complete low level handler
786 * @mEp: endpoint
787 *
788 * This function returns an error code
789 * Caller must hold lock
790 */
791 static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
792 __releases(mEp->lock)
793 __acquires(mEp->lock)
794 {
795 struct ci13xxx_req *mReq, *mReqTemp;
796 struct ci13xxx_ep *mEpTemp = mEp;
797 int uninitialized_var(retval);
798
799 if (list_empty(&mEp->qh.queue))
800 return -EINVAL;
801
802 list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
803 queue) {
804 retval = _hardware_dequeue(mEp, mReq);
805 if (retval < 0)
806 break;
807 list_del_init(&mReq->queue);
808 dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
809 if (mReq->req.complete != NULL) {
810 spin_unlock(mEp->lock);
811 if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
812 mReq->req.length)
813 mEpTemp = mEp->udc->ep0in;
814 mReq->req.complete(&mEpTemp->ep, &mReq->req);
815 spin_lock(mEp->lock);
816 }
817 }
818
819 if (retval == -EBUSY)
820 retval = 0;
821 if (retval < 0)
822 dbg_event(_usb_addr(mEp), "DONE", retval);
823
824 return retval;
825 }
826
827 /**
828 * isr_tr_complete_handler: transaction complete interrupt handler
829 * @udc: UDC descriptor
830 *
831 * This function handles traffic events
832 */
833 static void isr_tr_complete_handler(struct ci13xxx *udc)
834 __releases(udc->lock)
835 __acquires(udc->lock)
836 {
837 unsigned i;
838 u8 tmode = 0;
839
840 for (i = 0; i < udc->hw_ep_max; i++) {
841 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
842 int type, num, dir, err = -EINVAL;
843 struct usb_ctrlrequest req;
844
845 if (mEp->ep.desc == NULL)
846 continue; /* not configured */
847
848 if (hw_test_and_clear_complete(udc, i)) {
849 err = isr_tr_complete_low(mEp);
850 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
851 if (err > 0) /* needs status phase */
852 err = isr_setup_status_phase(udc);
853 if (err < 0) {
854 dbg_event(_usb_addr(mEp),
855 "ERROR", err);
856 spin_unlock(&udc->lock);
857 if (usb_ep_set_halt(&mEp->ep))
858 dev_err(udc->dev,
859 "error: ep_set_halt\n");
860 spin_lock(&udc->lock);
861 }
862 }
863 }
864
865 if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
866 !hw_test_and_clear_setup_status(udc, i))
867 continue;
868
869 if (i != 0) {
870 dev_warn(udc->dev, "ctrl traffic at endpoint %d\n", i);
871 continue;
872 }
873
874 /*
875 * Flush data and handshake transactions of previous
876 * setup packet.
877 */
878 _ep_nuke(udc->ep0out);
879 _ep_nuke(udc->ep0in);
880
881 /* read_setup_packet */
882 do {
883 hw_test_and_set_setup_guard(udc);
884 memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
885 } while (!hw_test_and_clear_setup_guard(udc));
886
887 type = req.bRequestType;
888
889 udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
890
891 dbg_setup(_usb_addr(mEp), &req);
892
893 switch (req.bRequest) {
894 case USB_REQ_CLEAR_FEATURE:
895 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
896 le16_to_cpu(req.wValue) ==
897 USB_ENDPOINT_HALT) {
898 if (req.wLength != 0)
899 break;
900 num = le16_to_cpu(req.wIndex);
901 dir = num & USB_ENDPOINT_DIR_MASK;
902 num &= USB_ENDPOINT_NUMBER_MASK;
903 if (dir) /* TX */
904 num += udc->hw_ep_max/2;
905 if (!udc->ci13xxx_ep[num].wedge) {
906 spin_unlock(&udc->lock);
907 err = usb_ep_clear_halt(
908 &udc->ci13xxx_ep[num].ep);
909 spin_lock(&udc->lock);
910 if (err)
911 break;
912 }
913 err = isr_setup_status_phase(udc);
914 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
915 le16_to_cpu(req.wValue) ==
916 USB_DEVICE_REMOTE_WAKEUP) {
917 if (req.wLength != 0)
918 break;
919 udc->remote_wakeup = 0;
920 err = isr_setup_status_phase(udc);
921 } else {
922 goto delegate;
923 }
924 break;
925 case USB_REQ_GET_STATUS:
926 if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
927 type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
928 type != (USB_DIR_IN|USB_RECIP_INTERFACE))
929 goto delegate;
930 if (le16_to_cpu(req.wLength) != 2 ||
931 le16_to_cpu(req.wValue) != 0)
932 break;
933 err = isr_get_status_response(udc, &req);
934 break;
935 case USB_REQ_SET_ADDRESS:
936 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
937 goto delegate;
938 if (le16_to_cpu(req.wLength) != 0 ||
939 le16_to_cpu(req.wIndex) != 0)
940 break;
941 udc->address = (u8)le16_to_cpu(req.wValue);
942 udc->setaddr = true;
943 err = isr_setup_status_phase(udc);
944 break;
945 case USB_REQ_SET_FEATURE:
946 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
947 le16_to_cpu(req.wValue) ==
948 USB_ENDPOINT_HALT) {
949 if (req.wLength != 0)
950 break;
951 num = le16_to_cpu(req.wIndex);
952 dir = num & USB_ENDPOINT_DIR_MASK;
953 num &= USB_ENDPOINT_NUMBER_MASK;
954 if (dir) /* TX */
955 num += udc->hw_ep_max/2;
956
957 spin_unlock(&udc->lock);
958 err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
959 spin_lock(&udc->lock);
960 if (!err)
961 isr_setup_status_phase(udc);
962 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
963 if (req.wLength != 0)
964 break;
965 switch (le16_to_cpu(req.wValue)) {
966 case USB_DEVICE_REMOTE_WAKEUP:
967 udc->remote_wakeup = 1;
968 err = isr_setup_status_phase(udc);
969 break;
970 case USB_DEVICE_TEST_MODE:
971 tmode = le16_to_cpu(req.wIndex) >> 8;
972 switch (tmode) {
973 case TEST_J:
974 case TEST_K:
975 case TEST_SE0_NAK:
976 case TEST_PACKET:
977 case TEST_FORCE_EN:
978 udc->test_mode = tmode;
979 err = isr_setup_status_phase(
980 udc);
981 break;
982 default:
983 break;
984 }
985 default:
986 goto delegate;
987 }
988 } else {
989 goto delegate;
990 }
991 break;
992 default:
993 delegate:
994 if (req.wLength == 0) /* no data phase */
995 udc->ep0_dir = TX;
996
997 spin_unlock(&udc->lock);
998 err = udc->driver->setup(&udc->gadget, &req);
999 spin_lock(&udc->lock);
1000 break;
1001 }
1002
1003 if (err < 0) {
1004 dbg_event(_usb_addr(mEp), "ERROR", err);
1005
1006 spin_unlock(&udc->lock);
1007 if (usb_ep_set_halt(&mEp->ep))
1008 dev_err(udc->dev, "error: ep_set_halt\n");
1009 spin_lock(&udc->lock);
1010 }
1011 }
1012 }
1013
1014 /******************************************************************************
1015 * ENDPT block
1016 *****************************************************************************/
1017 /**
1018 * ep_enable: configure endpoint, making it usable
1019 *
1020 * Check usb_ep_enable() at "usb_gadget.h" for details
1021 */
1022 static int ep_enable(struct usb_ep *ep,
1023 const struct usb_endpoint_descriptor *desc)
1024 {
1025 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1026 int retval = 0;
1027 unsigned long flags;
1028
1029 if (ep == NULL || desc == NULL)
1030 return -EINVAL;
1031
1032 spin_lock_irqsave(mEp->lock, flags);
1033
1034 /* only internal SW should enable ctrl endpts */
1035
1036 mEp->ep.desc = desc;
1037
1038 if (!list_empty(&mEp->qh.queue))
1039 dev_warn(mEp->udc->dev, "enabling a non-empty endpoint!\n");
1040
1041 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
1042 mEp->num = usb_endpoint_num(desc);
1043 mEp->type = usb_endpoint_type(desc);
1044
1045 mEp->ep.maxpacket = usb_endpoint_maxp(desc);
1046
1047 dbg_event(_usb_addr(mEp), "ENABLE", 0);
1048
1049 mEp->qh.ptr->cap = 0;
1050
1051 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1052 mEp->qh.ptr->cap |= QH_IOS;
1053 else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
1054 mEp->qh.ptr->cap &= ~QH_MULT;
1055 else
1056 mEp->qh.ptr->cap &= ~QH_ZLT;
1057
1058 mEp->qh.ptr->cap |=
1059 (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
1060 mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */
1061
1062 /*
1063 * Enable endpoints in the HW other than ep0 as ep0
1064 * is always enabled
1065 */
1066 if (mEp->num)
1067 retval |= hw_ep_enable(mEp->udc, mEp->num, mEp->dir, mEp->type);
1068
1069 spin_unlock_irqrestore(mEp->lock, flags);
1070 return retval;
1071 }
1072
1073 /**
1074 * ep_disable: endpoint is no longer usable
1075 *
1076 * Check usb_ep_disable() at "usb_gadget.h" for details
1077 */
1078 static int ep_disable(struct usb_ep *ep)
1079 {
1080 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1081 int direction, retval = 0;
1082 unsigned long flags;
1083
1084 if (ep == NULL)
1085 return -EINVAL;
1086 else if (mEp->ep.desc == NULL)
1087 return -EBUSY;
1088
1089 spin_lock_irqsave(mEp->lock, flags);
1090
1091 /* only internal SW should disable ctrl endpts */
1092
1093 direction = mEp->dir;
1094 do {
1095 dbg_event(_usb_addr(mEp), "DISABLE", 0);
1096
1097 retval |= _ep_nuke(mEp);
1098 retval |= hw_ep_disable(mEp->udc, mEp->num, mEp->dir);
1099
1100 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1101 mEp->dir = (mEp->dir == TX) ? RX : TX;
1102
1103 } while (mEp->dir != direction);
1104
1105 mEp->ep.desc = NULL;
1106
1107 spin_unlock_irqrestore(mEp->lock, flags);
1108 return retval;
1109 }
1110
1111 /**
1112 * ep_alloc_request: allocate a request object to use with this endpoint
1113 *
1114 * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1115 */
1116 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1117 {
1118 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1119 struct ci13xxx_req *mReq = NULL;
1120
1121 if (ep == NULL)
1122 return NULL;
1123
1124 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
1125 if (mReq != NULL) {
1126 INIT_LIST_HEAD(&mReq->queue);
1127 mReq->req.dma = DMA_ADDR_INVALID;
1128
1129 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
1130 &mReq->dma);
1131 if (mReq->ptr == NULL) {
1132 kfree(mReq);
1133 mReq = NULL;
1134 }
1135 }
1136
1137 dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
1138
1139 return (mReq == NULL) ? NULL : &mReq->req;
1140 }
1141
1142 /**
1143 * ep_free_request: frees a request object
1144 *
1145 * Check usb_ep_free_request() at "usb_gadget.h" for details
1146 */
1147 static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1148 {
1149 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1150 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1151 unsigned long flags;
1152
1153 if (ep == NULL || req == NULL) {
1154 return;
1155 } else if (!list_empty(&mReq->queue)) {
1156 dev_err(mEp->udc->dev, "freeing queued request\n");
1157 return;
1158 }
1159
1160 spin_lock_irqsave(mEp->lock, flags);
1161
1162 if (mReq->ptr)
1163 dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
1164 kfree(mReq);
1165
1166 dbg_event(_usb_addr(mEp), "FREE", 0);
1167
1168 spin_unlock_irqrestore(mEp->lock, flags);
1169 }
1170
1171 /**
1172 * ep_queue: queues (submits) an I/O request to an endpoint
1173 *
1174 * Check usb_ep_queue()* at usb_gadget.h" for details
1175 */
1176 static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1177 gfp_t __maybe_unused gfp_flags)
1178 {
1179 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1180 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1181 struct ci13xxx *udc = mEp->udc;
1182 int retval = 0;
1183 unsigned long flags;
1184
1185 if (ep == NULL || req == NULL || mEp->ep.desc == NULL)
1186 return -EINVAL;
1187
1188 spin_lock_irqsave(mEp->lock, flags);
1189
1190 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
1191 if (req->length)
1192 mEp = (udc->ep0_dir == RX) ?
1193 udc->ep0out : udc->ep0in;
1194 if (!list_empty(&mEp->qh.queue)) {
1195 _ep_nuke(mEp);
1196 retval = -EOVERFLOW;
1197 dev_warn(mEp->udc->dev, "endpoint ctrl %X nuked\n",
1198 _usb_addr(mEp));
1199 }
1200 }
1201
1202 /* first nuke then test link, e.g. previous status has not sent */
1203 if (!list_empty(&mReq->queue)) {
1204 retval = -EBUSY;
1205 dev_err(mEp->udc->dev, "request already in queue\n");
1206 goto done;
1207 }
1208
1209 if (req->length > 4 * CI13XXX_PAGE_SIZE) {
1210 req->length = 4 * CI13XXX_PAGE_SIZE;
1211 retval = -EMSGSIZE;
1212 dev_warn(mEp->udc->dev, "request length truncated\n");
1213 }
1214
1215 dbg_queue(_usb_addr(mEp), req, retval);
1216
1217 /* push request */
1218 mReq->req.status = -EINPROGRESS;
1219 mReq->req.actual = 0;
1220
1221 retval = _hardware_enqueue(mEp, mReq);
1222
1223 if (retval == -EALREADY) {
1224 dbg_event(_usb_addr(mEp), "QUEUE", retval);
1225 retval = 0;
1226 }
1227 if (!retval)
1228 list_add_tail(&mReq->queue, &mEp->qh.queue);
1229
1230 done:
1231 spin_unlock_irqrestore(mEp->lock, flags);
1232 return retval;
1233 }
1234
1235 /**
1236 * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1237 *
1238 * Check usb_ep_dequeue() at "usb_gadget.h" for details
1239 */
1240 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1241 {
1242 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1243 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1244 unsigned long flags;
1245
1246 if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
1247 mEp->ep.desc == NULL || list_empty(&mReq->queue) ||
1248 list_empty(&mEp->qh.queue))
1249 return -EINVAL;
1250
1251 spin_lock_irqsave(mEp->lock, flags);
1252
1253 dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
1254
1255 hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
1256
1257 /* pop request */
1258 list_del_init(&mReq->queue);
1259 if (mReq->map) {
1260 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
1261 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1262 mReq->req.dma = DMA_ADDR_INVALID;
1263 mReq->map = 0;
1264 }
1265 req->status = -ECONNRESET;
1266
1267 if (mReq->req.complete != NULL) {
1268 spin_unlock(mEp->lock);
1269 mReq->req.complete(&mEp->ep, &mReq->req);
1270 spin_lock(mEp->lock);
1271 }
1272
1273 spin_unlock_irqrestore(mEp->lock, flags);
1274 return 0;
1275 }
1276
1277 /**
1278 * ep_set_halt: sets the endpoint halt feature
1279 *
1280 * Check usb_ep_set_halt() at "usb_gadget.h" for details
1281 */
1282 static int ep_set_halt(struct usb_ep *ep, int value)
1283 {
1284 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1285 int direction, retval = 0;
1286 unsigned long flags;
1287
1288 if (ep == NULL || mEp->ep.desc == NULL)
1289 return -EINVAL;
1290
1291 spin_lock_irqsave(mEp->lock, flags);
1292
1293 #ifndef STALL_IN
1294 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
1295 if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
1296 !list_empty(&mEp->qh.queue)) {
1297 spin_unlock_irqrestore(mEp->lock, flags);
1298 return -EAGAIN;
1299 }
1300 #endif
1301
1302 direction = mEp->dir;
1303 do {
1304 dbg_event(_usb_addr(mEp), "HALT", value);
1305 retval |= hw_ep_set_halt(mEp->udc, mEp->num, mEp->dir, value);
1306
1307 if (!value)
1308 mEp->wedge = 0;
1309
1310 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1311 mEp->dir = (mEp->dir == TX) ? RX : TX;
1312
1313 } while (mEp->dir != direction);
1314
1315 spin_unlock_irqrestore(mEp->lock, flags);
1316 return retval;
1317 }
1318
1319 /**
1320 * ep_set_wedge: sets the halt feature and ignores clear requests
1321 *
1322 * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1323 */
1324 static int ep_set_wedge(struct usb_ep *ep)
1325 {
1326 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1327 unsigned long flags;
1328
1329 if (ep == NULL || mEp->ep.desc == NULL)
1330 return -EINVAL;
1331
1332 spin_lock_irqsave(mEp->lock, flags);
1333
1334 dbg_event(_usb_addr(mEp), "WEDGE", 0);
1335 mEp->wedge = 1;
1336
1337 spin_unlock_irqrestore(mEp->lock, flags);
1338
1339 return usb_ep_set_halt(ep);
1340 }
1341
1342 /**
1343 * ep_fifo_flush: flushes contents of a fifo
1344 *
1345 * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1346 */
1347 static void ep_fifo_flush(struct usb_ep *ep)
1348 {
1349 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1350 unsigned long flags;
1351
1352 if (ep == NULL) {
1353 dev_err(mEp->udc->dev, "%02X: -EINVAL\n", _usb_addr(mEp));
1354 return;
1355 }
1356
1357 spin_lock_irqsave(mEp->lock, flags);
1358
1359 dbg_event(_usb_addr(mEp), "FFLUSH", 0);
1360 hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
1361
1362 spin_unlock_irqrestore(mEp->lock, flags);
1363 }
1364
1365 /**
1366 * Endpoint-specific part of the API to the USB controller hardware
1367 * Check "usb_gadget.h" for details
1368 */
1369 static const struct usb_ep_ops usb_ep_ops = {
1370 .enable = ep_enable,
1371 .disable = ep_disable,
1372 .alloc_request = ep_alloc_request,
1373 .free_request = ep_free_request,
1374 .queue = ep_queue,
1375 .dequeue = ep_dequeue,
1376 .set_halt = ep_set_halt,
1377 .set_wedge = ep_set_wedge,
1378 .fifo_flush = ep_fifo_flush,
1379 };
1380
1381 /******************************************************************************
1382 * GADGET block
1383 *****************************************************************************/
1384 static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
1385 {
1386 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1387 unsigned long flags;
1388 int gadget_ready = 0;
1389
1390 if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
1391 return -EOPNOTSUPP;
1392
1393 spin_lock_irqsave(&udc->lock, flags);
1394 udc->vbus_active = is_active;
1395 if (udc->driver)
1396 gadget_ready = 1;
1397 spin_unlock_irqrestore(&udc->lock, flags);
1398
1399 if (gadget_ready) {
1400 if (is_active) {
1401 pm_runtime_get_sync(&_gadget->dev);
1402 hw_device_reset(udc);
1403 hw_device_state(udc, udc->ep0out->qh.dma);
1404 } else {
1405 hw_device_state(udc, 0);
1406 if (udc->udc_driver->notify_event)
1407 udc->udc_driver->notify_event(udc,
1408 CI13XXX_CONTROLLER_STOPPED_EVENT);
1409 _gadget_stop_activity(&udc->gadget);
1410 pm_runtime_put_sync(&_gadget->dev);
1411 }
1412 }
1413
1414 return 0;
1415 }
1416
1417 static int ci13xxx_wakeup(struct usb_gadget *_gadget)
1418 {
1419 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1420 unsigned long flags;
1421 int ret = 0;
1422
1423 spin_lock_irqsave(&udc->lock, flags);
1424 if (!udc->remote_wakeup) {
1425 ret = -EOPNOTSUPP;
1426 goto out;
1427 }
1428 if (!hw_read(udc, OP_PORTSC, PORTSC_SUSP)) {
1429 ret = -EINVAL;
1430 goto out;
1431 }
1432 hw_write(udc, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1433 out:
1434 spin_unlock_irqrestore(&udc->lock, flags);
1435 return ret;
1436 }
1437
1438 static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1439 {
1440 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1441
1442 if (udc->transceiver)
1443 return usb_phy_set_power(udc->transceiver, mA);
1444 return -ENOTSUPP;
1445 }
1446
1447 static int ci13xxx_start(struct usb_gadget *gadget,
1448 struct usb_gadget_driver *driver);
1449 static int ci13xxx_stop(struct usb_gadget *gadget,
1450 struct usb_gadget_driver *driver);
1451 /**
1452 * Device operations part of the API to the USB controller hardware,
1453 * which don't involve endpoints (or i/o)
1454 * Check "usb_gadget.h" for details
1455 */
1456 static const struct usb_gadget_ops usb_gadget_ops = {
1457 .vbus_session = ci13xxx_vbus_session,
1458 .wakeup = ci13xxx_wakeup,
1459 .vbus_draw = ci13xxx_vbus_draw,
1460 .udc_start = ci13xxx_start,
1461 .udc_stop = ci13xxx_stop,
1462 };
1463
1464 static int init_eps(struct ci13xxx *udc)
1465 {
1466 int retval = 0, i, j;
1467
1468 for (i = 0; i < udc->hw_ep_max/2; i++)
1469 for (j = RX; j <= TX; j++) {
1470 int k = i + j * udc->hw_ep_max/2;
1471 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
1472
1473 scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
1474 (j == TX) ? "in" : "out");
1475
1476 mEp->udc = udc;
1477 mEp->lock = &udc->lock;
1478 mEp->device = &udc->gadget.dev;
1479 mEp->td_pool = udc->td_pool;
1480
1481 mEp->ep.name = mEp->name;
1482 mEp->ep.ops = &usb_ep_ops;
1483 mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
1484
1485 INIT_LIST_HEAD(&mEp->qh.queue);
1486 mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
1487 &mEp->qh.dma);
1488 if (mEp->qh.ptr == NULL)
1489 retval = -ENOMEM;
1490 else
1491 memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
1492
1493 /*
1494 * set up shorthands for ep0 out and in endpoints,
1495 * don't add to gadget's ep_list
1496 */
1497 if (i == 0) {
1498 if (j == RX)
1499 udc->ep0out = mEp;
1500 else
1501 udc->ep0in = mEp;
1502
1503 continue;
1504 }
1505
1506 list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
1507 }
1508
1509 return retval;
1510 }
1511
1512 /**
1513 * ci13xxx_start: register a gadget driver
1514 * @gadget: our gadget
1515 * @driver: the driver being registered
1516 *
1517 * Interrupts are enabled here.
1518 */
1519 static int ci13xxx_start(struct usb_gadget *gadget,
1520 struct usb_gadget_driver *driver)
1521 {
1522 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
1523 unsigned long flags;
1524 int retval = -ENOMEM;
1525
1526 if (driver->disconnect == NULL)
1527 return -EINVAL;
1528
1529
1530 udc->ep0out->ep.desc = &ctrl_endpt_out_desc;
1531 retval = usb_ep_enable(&udc->ep0out->ep);
1532 if (retval)
1533 return retval;
1534
1535 udc->ep0in->ep.desc = &ctrl_endpt_in_desc;
1536 retval = usb_ep_enable(&udc->ep0in->ep);
1537 if (retval)
1538 return retval;
1539 spin_lock_irqsave(&udc->lock, flags);
1540
1541 udc->driver = driver;
1542 pm_runtime_get_sync(&udc->gadget.dev);
1543 if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
1544 if (udc->vbus_active) {
1545 if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
1546 hw_device_reset(udc);
1547 } else {
1548 pm_runtime_put_sync(&udc->gadget.dev);
1549 goto done;
1550 }
1551 }
1552
1553 retval = hw_device_state(udc, udc->ep0out->qh.dma);
1554 if (retval)
1555 pm_runtime_put_sync(&udc->gadget.dev);
1556
1557 done:
1558 spin_unlock_irqrestore(&udc->lock, flags);
1559 return retval;
1560 }
1561
1562 /**
1563 * ci13xxx_stop: unregister a gadget driver
1564 */
1565 static int ci13xxx_stop(struct usb_gadget *gadget,
1566 struct usb_gadget_driver *driver)
1567 {
1568 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
1569 unsigned long flags;
1570
1571 spin_lock_irqsave(&udc->lock, flags);
1572
1573 if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
1574 udc->vbus_active) {
1575 hw_device_state(udc, 0);
1576 if (udc->udc_driver->notify_event)
1577 udc->udc_driver->notify_event(udc,
1578 CI13XXX_CONTROLLER_STOPPED_EVENT);
1579 udc->driver = NULL;
1580 spin_unlock_irqrestore(&udc->lock, flags);
1581 _gadget_stop_activity(&udc->gadget);
1582 spin_lock_irqsave(&udc->lock, flags);
1583 pm_runtime_put(&udc->gadget.dev);
1584 }
1585
1586 spin_unlock_irqrestore(&udc->lock, flags);
1587
1588 return 0;
1589 }
1590
1591 /******************************************************************************
1592 * BUS block
1593 *****************************************************************************/
1594 /**
1595 * udc_irq: global interrupt handler
1596 *
1597 * This function returns IRQ_HANDLED if the IRQ has been handled
1598 * It locks access to registers
1599 */
1600 irqreturn_t udc_irq(int irq, void *data)
1601 {
1602 struct ci13xxx *udc = data;
1603 irqreturn_t retval;
1604 u32 intr;
1605
1606 if (udc == NULL)
1607 return IRQ_HANDLED;
1608
1609 spin_lock(&udc->lock);
1610
1611 if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
1612 if (hw_read(udc, OP_USBMODE, USBMODE_CM) !=
1613 USBMODE_CM_DEVICE) {
1614 spin_unlock(&udc->lock);
1615 return IRQ_NONE;
1616 }
1617 }
1618 intr = hw_test_and_clear_intr_active(udc);
1619 dbg_interrupt(intr);
1620
1621 if (intr) {
1622 /* order defines priority - do NOT change it */
1623 if (USBi_URI & intr)
1624 isr_reset_handler(udc);
1625
1626 if (USBi_PCI & intr) {
1627 udc->gadget.speed = hw_port_is_high_speed(udc) ?
1628 USB_SPEED_HIGH : USB_SPEED_FULL;
1629 if (udc->suspended && udc->driver->resume) {
1630 spin_unlock(&udc->lock);
1631 udc->driver->resume(&udc->gadget);
1632 spin_lock(&udc->lock);
1633 udc->suspended = 0;
1634 }
1635 }
1636
1637 if (USBi_UI & intr)
1638 isr_tr_complete_handler(udc);
1639
1640 if (USBi_SLI & intr) {
1641 if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
1642 udc->driver->suspend) {
1643 udc->suspended = 1;
1644 spin_unlock(&udc->lock);
1645 udc->driver->suspend(&udc->gadget);
1646 spin_lock(&udc->lock);
1647 }
1648 }
1649 retval = IRQ_HANDLED;
1650 } else {
1651 retval = IRQ_NONE;
1652 }
1653 spin_unlock(&udc->lock);
1654
1655 return retval;
1656 }
1657
1658 /**
1659 * udc_release: driver release function
1660 * @dev: device
1661 *
1662 * Currently does nothing
1663 */
1664 static void udc_release(struct device *dev)
1665 {
1666 }
1667
1668 /**
1669 * udc_probe: parent probe must call this to initialize UDC
1670 * @dev: parent device
1671 * @regs: registers base address
1672 * @name: driver name
1673 *
1674 * This function returns an error code
1675 * No interrupts active, the IRQ has not been requested yet
1676 * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
1677 */
1678 int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
1679 void __iomem *regs, struct ci13xxx **_udc)
1680 {
1681 struct ci13xxx *udc;
1682 int retval = 0;
1683
1684 if (dev == NULL || regs == NULL || driver == NULL ||
1685 driver->name == NULL)
1686 return -EINVAL;
1687
1688 udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
1689 if (udc == NULL)
1690 return -ENOMEM;
1691
1692 spin_lock_init(&udc->lock);
1693 udc->regs = regs;
1694 udc->udc_driver = driver;
1695
1696 udc->gadget.ops = &usb_gadget_ops;
1697 udc->gadget.speed = USB_SPEED_UNKNOWN;
1698 udc->gadget.max_speed = USB_SPEED_HIGH;
1699 udc->gadget.is_otg = 0;
1700 udc->gadget.name = driver->name;
1701
1702 INIT_LIST_HEAD(&udc->gadget.ep_list);
1703
1704 dev_set_name(&udc->gadget.dev, "gadget");
1705 udc->gadget.dev.dma_mask = dev->dma_mask;
1706 udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
1707 udc->gadget.dev.parent = dev;
1708 udc->gadget.dev.release = udc_release;
1709
1710 udc->dev = dev;
1711
1712 /* alloc resources */
1713 udc->qh_pool = dma_pool_create("ci13xxx_qh", dev,
1714 sizeof(struct ci13xxx_qh),
1715 64, CI13XXX_PAGE_SIZE);
1716 if (udc->qh_pool == NULL) {
1717 retval = -ENOMEM;
1718 goto free_udc;
1719 }
1720
1721 udc->td_pool = dma_pool_create("ci13xxx_td", dev,
1722 sizeof(struct ci13xxx_td),
1723 64, CI13XXX_PAGE_SIZE);
1724 if (udc->td_pool == NULL) {
1725 retval = -ENOMEM;
1726 goto free_qh_pool;
1727 }
1728
1729 retval = hw_device_init(udc, regs, driver->capoffset);
1730 if (retval < 0)
1731 goto free_pools;
1732
1733 retval = init_eps(udc);
1734 if (retval)
1735 goto free_pools;
1736
1737 udc->gadget.ep0 = &udc->ep0in->ep;
1738
1739 udc->transceiver = usb_get_transceiver();
1740
1741 if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
1742 if (udc->transceiver == NULL) {
1743 retval = -ENODEV;
1744 goto free_pools;
1745 }
1746 }
1747
1748 if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
1749 retval = hw_device_reset(udc);
1750 if (retval)
1751 goto put_transceiver;
1752 }
1753
1754 retval = device_register(&udc->gadget.dev);
1755 if (retval) {
1756 put_device(&udc->gadget.dev);
1757 goto put_transceiver;
1758 }
1759
1760 retval = dbg_create_files(&udc->gadget.dev);
1761 if (retval)
1762 goto unreg_device;
1763
1764 if (udc->transceiver) {
1765 retval = otg_set_peripheral(udc->transceiver->otg,
1766 &udc->gadget);
1767 if (retval)
1768 goto remove_dbg;
1769 }
1770
1771 retval = usb_add_gadget_udc(dev, &udc->gadget);
1772 if (retval)
1773 goto remove_trans;
1774
1775 pm_runtime_no_callbacks(&udc->gadget.dev);
1776 pm_runtime_enable(&udc->gadget.dev);
1777
1778 *_udc = udc;
1779 return retval;
1780
1781 remove_trans:
1782 if (udc->transceiver) {
1783 otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
1784 usb_put_transceiver(udc->transceiver);
1785 }
1786
1787 dev_err(dev, "error = %i\n", retval);
1788 remove_dbg:
1789 dbg_remove_files(&udc->gadget.dev);
1790 unreg_device:
1791 device_unregister(&udc->gadget.dev);
1792 put_transceiver:
1793 if (udc->transceiver)
1794 usb_put_transceiver(udc->transceiver);
1795 free_pools:
1796 dma_pool_destroy(udc->td_pool);
1797 free_qh_pool:
1798 dma_pool_destroy(udc->qh_pool);
1799 free_udc:
1800 kfree(udc);
1801 *_udc = NULL;
1802 return retval;
1803 }
1804
1805 /**
1806 * udc_remove: parent remove must call this to remove UDC
1807 *
1808 * No interrupts active, the IRQ has been released
1809 */
1810 void udc_remove(struct ci13xxx *udc)
1811 {
1812 int i;
1813
1814 if (udc == NULL)
1815 return;
1816
1817 usb_del_gadget_udc(&udc->gadget);
1818
1819 for (i = 0; i < udc->hw_ep_max; i++) {
1820 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
1821
1822 dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
1823 }
1824
1825 dma_pool_destroy(udc->td_pool);
1826 dma_pool_destroy(udc->qh_pool);
1827
1828 if (udc->transceiver) {
1829 otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
1830 usb_put_transceiver(udc->transceiver);
1831 }
1832 dbg_remove_files(&udc->gadget.dev);
1833 device_unregister(&udc->gadget.dev);
1834
1835 kfree(udc->hw_bank.regmap);
1836 kfree(udc);
1837 }