usb: chipidea: udc: only clear active and halted bits in qhead
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / chipidea / udc.c
1 /*
2 * udc.c - ChipIdea UDC driver
3 *
4 * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
5 *
6 * Author: David Lopo
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/dmapool.h>
16 #include <linux/err.h>
17 #include <linux/irqreturn.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/usb/otg.h>
24 #include <linux/usb/chipidea.h>
25
26 #include "ci.h"
27 #include "udc.h"
28 #include "bits.h"
29 #include "debug.h"
30
31 /* control endpoint description */
32 static const struct usb_endpoint_descriptor
33 ctrl_endpt_out_desc = {
34 .bLength = USB_DT_ENDPOINT_SIZE,
35 .bDescriptorType = USB_DT_ENDPOINT,
36
37 .bEndpointAddress = USB_DIR_OUT,
38 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
39 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
40 };
41
42 static const struct usb_endpoint_descriptor
43 ctrl_endpt_in_desc = {
44 .bLength = USB_DT_ENDPOINT_SIZE,
45 .bDescriptorType = USB_DT_ENDPOINT,
46
47 .bEndpointAddress = USB_DIR_IN,
48 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
49 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
50 };
51
52 /**
53 * hw_ep_bit: calculates the bit number
54 * @num: endpoint number
55 * @dir: endpoint direction
56 *
57 * This function returns bit number
58 */
59 static inline int hw_ep_bit(int num, int dir)
60 {
61 return num + (dir ? 16 : 0);
62 }
63
64 static inline int ep_to_bit(struct ci13xxx *ci, int n)
65 {
66 int fill = 16 - ci->hw_ep_max / 2;
67
68 if (n >= ci->hw_ep_max / 2)
69 n += fill;
70
71 return n;
72 }
73
74 /**
75 * hw_device_state: enables/disables interrupts (execute without interruption)
76 * @dma: 0 => disable, !0 => enable and set dma engine
77 *
78 * This function returns an error code
79 */
80 static int hw_device_state(struct ci13xxx *ci, u32 dma)
81 {
82 if (dma) {
83 hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
84 /* interrupt, error, port change, reset, sleep/suspend */
85 hw_write(ci, OP_USBINTR, ~0,
86 USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
87 } else {
88 hw_write(ci, OP_USBINTR, ~0, 0);
89 }
90 return 0;
91 }
92
93 /**
94 * hw_ep_flush: flush endpoint fifo (execute without interruption)
95 * @num: endpoint number
96 * @dir: endpoint direction
97 *
98 * This function returns an error code
99 */
100 static int hw_ep_flush(struct ci13xxx *ci, int num, int dir)
101 {
102 int n = hw_ep_bit(num, dir);
103
104 do {
105 /* flush any pending transfer */
106 hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n));
107 while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
108 cpu_relax();
109 } while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
110
111 return 0;
112 }
113
114 /**
115 * hw_ep_disable: disables endpoint (execute without interruption)
116 * @num: endpoint number
117 * @dir: endpoint direction
118 *
119 * This function returns an error code
120 */
121 static int hw_ep_disable(struct ci13xxx *ci, int num, int dir)
122 {
123 hw_ep_flush(ci, num, dir);
124 hw_write(ci, OP_ENDPTCTRL + num,
125 dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
126 return 0;
127 }
128
129 /**
130 * hw_ep_enable: enables endpoint (execute without interruption)
131 * @num: endpoint number
132 * @dir: endpoint direction
133 * @type: endpoint type
134 *
135 * This function returns an error code
136 */
137 static int hw_ep_enable(struct ci13xxx *ci, int num, int dir, int type)
138 {
139 u32 mask, data;
140
141 if (dir) {
142 mask = ENDPTCTRL_TXT; /* type */
143 data = type << __ffs(mask);
144
145 mask |= ENDPTCTRL_TXS; /* unstall */
146 mask |= ENDPTCTRL_TXR; /* reset data toggle */
147 data |= ENDPTCTRL_TXR;
148 mask |= ENDPTCTRL_TXE; /* enable */
149 data |= ENDPTCTRL_TXE;
150 } else {
151 mask = ENDPTCTRL_RXT; /* type */
152 data = type << __ffs(mask);
153
154 mask |= ENDPTCTRL_RXS; /* unstall */
155 mask |= ENDPTCTRL_RXR; /* reset data toggle */
156 data |= ENDPTCTRL_RXR;
157 mask |= ENDPTCTRL_RXE; /* enable */
158 data |= ENDPTCTRL_RXE;
159 }
160 hw_write(ci, OP_ENDPTCTRL + num, mask, data);
161 return 0;
162 }
163
164 /**
165 * hw_ep_get_halt: return endpoint halt status
166 * @num: endpoint number
167 * @dir: endpoint direction
168 *
169 * This function returns 1 if endpoint halted
170 */
171 static int hw_ep_get_halt(struct ci13xxx *ci, int num, int dir)
172 {
173 u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
174
175 return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
176 }
177
178 /**
179 * hw_test_and_clear_setup_status: test & clear setup status (execute without
180 * interruption)
181 * @n: endpoint number
182 *
183 * This function returns setup status
184 */
185 static int hw_test_and_clear_setup_status(struct ci13xxx *ci, int n)
186 {
187 n = ep_to_bit(ci, n);
188 return hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(n));
189 }
190
191 /**
192 * hw_ep_prime: primes endpoint (execute without interruption)
193 * @num: endpoint number
194 * @dir: endpoint direction
195 * @is_ctrl: true if control endpoint
196 *
197 * This function returns an error code
198 */
199 static int hw_ep_prime(struct ci13xxx *ci, int num, int dir, int is_ctrl)
200 {
201 int n = hw_ep_bit(num, dir);
202
203 if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
204 return -EAGAIN;
205
206 hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n));
207
208 while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
209 cpu_relax();
210 if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
211 return -EAGAIN;
212
213 /* status shoult be tested according with manual but it doesn't work */
214 return 0;
215 }
216
217 /**
218 * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
219 * without interruption)
220 * @num: endpoint number
221 * @dir: endpoint direction
222 * @value: true => stall, false => unstall
223 *
224 * This function returns an error code
225 */
226 static int hw_ep_set_halt(struct ci13xxx *ci, int num, int dir, int value)
227 {
228 if (value != 0 && value != 1)
229 return -EINVAL;
230
231 do {
232 enum ci13xxx_regs reg = OP_ENDPTCTRL + num;
233 u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
234 u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
235
236 /* data toggle - reserved for EP0 but it's in ESS */
237 hw_write(ci, reg, mask_xs|mask_xr,
238 value ? mask_xs : mask_xr);
239 } while (value != hw_ep_get_halt(ci, num, dir));
240
241 return 0;
242 }
243
244 /**
245 * hw_is_port_high_speed: test if port is high speed
246 *
247 * This function returns true if high speed port
248 */
249 static int hw_port_is_high_speed(struct ci13xxx *ci)
250 {
251 return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
252 hw_read(ci, OP_PORTSC, PORTSC_HSP);
253 }
254
255 /**
256 * hw_read_intr_enable: returns interrupt enable register
257 *
258 * This function returns register data
259 */
260 static u32 hw_read_intr_enable(struct ci13xxx *ci)
261 {
262 return hw_read(ci, OP_USBINTR, ~0);
263 }
264
265 /**
266 * hw_read_intr_status: returns interrupt status register
267 *
268 * This function returns register data
269 */
270 static u32 hw_read_intr_status(struct ci13xxx *ci)
271 {
272 return hw_read(ci, OP_USBSTS, ~0);
273 }
274
275 /**
276 * hw_test_and_clear_complete: test & clear complete status (execute without
277 * interruption)
278 * @n: endpoint number
279 *
280 * This function returns complete status
281 */
282 static int hw_test_and_clear_complete(struct ci13xxx *ci, int n)
283 {
284 n = ep_to_bit(ci, n);
285 return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
286 }
287
288 /**
289 * hw_test_and_clear_intr_active: test & clear active interrupts (execute
290 * without interruption)
291 *
292 * This function returns active interrutps
293 */
294 static u32 hw_test_and_clear_intr_active(struct ci13xxx *ci)
295 {
296 u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
297
298 hw_write(ci, OP_USBSTS, ~0, reg);
299 return reg;
300 }
301
302 /**
303 * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
304 * interruption)
305 *
306 * This function returns guard value
307 */
308 static int hw_test_and_clear_setup_guard(struct ci13xxx *ci)
309 {
310 return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
311 }
312
313 /**
314 * hw_test_and_set_setup_guard: test & set setup guard (execute without
315 * interruption)
316 *
317 * This function returns guard value
318 */
319 static int hw_test_and_set_setup_guard(struct ci13xxx *ci)
320 {
321 return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
322 }
323
324 /**
325 * hw_usb_set_address: configures USB address (execute without interruption)
326 * @value: new USB address
327 *
328 * This function explicitly sets the address, without the "USBADRA" (advance)
329 * feature, which is not supported by older versions of the controller.
330 */
331 static void hw_usb_set_address(struct ci13xxx *ci, u8 value)
332 {
333 hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
334 value << __ffs(DEVICEADDR_USBADR));
335 }
336
337 /**
338 * hw_usb_reset: restart device after a bus reset (execute without
339 * interruption)
340 *
341 * This function returns an error code
342 */
343 static int hw_usb_reset(struct ci13xxx *ci)
344 {
345 hw_usb_set_address(ci, 0);
346
347 /* ESS flushes only at end?!? */
348 hw_write(ci, OP_ENDPTFLUSH, ~0, ~0);
349
350 /* clear setup token semaphores */
351 hw_write(ci, OP_ENDPTSETUPSTAT, 0, 0);
352
353 /* clear complete status */
354 hw_write(ci, OP_ENDPTCOMPLETE, 0, 0);
355
356 /* wait until all bits cleared */
357 while (hw_read(ci, OP_ENDPTPRIME, ~0))
358 udelay(10); /* not RTOS friendly */
359
360 /* reset all endpoints ? */
361
362 /* reset internal status and wait for further instructions
363 no need to verify the port reset status (ESS does it) */
364
365 return 0;
366 }
367
368 /******************************************************************************
369 * UTIL block
370 *****************************************************************************/
371 /**
372 * _usb_addr: calculates endpoint address from direction & number
373 * @ep: endpoint
374 */
375 static inline u8 _usb_addr(struct ci13xxx_ep *ep)
376 {
377 return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
378 }
379
380 /**
381 * _hardware_queue: configures a request at hardware level
382 * @gadget: gadget
383 * @mEp: endpoint
384 *
385 * This function returns an error code
386 */
387 static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
388 {
389 struct ci13xxx *ci = mEp->ci;
390 unsigned i;
391 int ret = 0;
392 unsigned length = mReq->req.length;
393
394 /* don't queue twice */
395 if (mReq->req.status == -EALREADY)
396 return -EALREADY;
397
398 mReq->req.status = -EALREADY;
399
400 if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
401 mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
402 &mReq->zdma);
403 if (mReq->zptr == NULL)
404 return -ENOMEM;
405
406 memset(mReq->zptr, 0, sizeof(*mReq->zptr));
407 mReq->zptr->next = cpu_to_le32(TD_TERMINATE);
408 mReq->zptr->token = cpu_to_le32(TD_STATUS_ACTIVE);
409 if (!mReq->req.no_interrupt)
410 mReq->zptr->token |= cpu_to_le32(TD_IOC);
411 }
412 ret = usb_gadget_map_request(&ci->gadget, &mReq->req, mEp->dir);
413 if (ret)
414 return ret;
415
416 /*
417 * TD configuration
418 * TODO - handle requests which spawns into several TDs
419 */
420 memset(mReq->ptr, 0, sizeof(*mReq->ptr));
421 mReq->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
422 mReq->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
423 mReq->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
424 if (mReq->zptr) {
425 mReq->ptr->next = cpu_to_le32(mReq->zdma);
426 } else {
427 mReq->ptr->next = cpu_to_le32(TD_TERMINATE);
428 if (!mReq->req.no_interrupt)
429 mReq->ptr->token |= cpu_to_le32(TD_IOC);
430 }
431 mReq->ptr->page[0] = cpu_to_le32(mReq->req.dma);
432 for (i = 1; i < 5; i++) {
433 u32 page = mReq->req.dma + i * CI13XXX_PAGE_SIZE;
434 page &= ~TD_RESERVED_MASK;
435 mReq->ptr->page[i] = cpu_to_le32(page);
436 }
437
438 if (!list_empty(&mEp->qh.queue)) {
439 struct ci13xxx_req *mReqPrev;
440 int n = hw_ep_bit(mEp->num, mEp->dir);
441 int tmp_stat;
442 u32 next = mReq->dma & TD_ADDR_MASK;
443
444 mReqPrev = list_entry(mEp->qh.queue.prev,
445 struct ci13xxx_req, queue);
446 if (mReqPrev->zptr)
447 mReqPrev->zptr->next = cpu_to_le32(next);
448 else
449 mReqPrev->ptr->next = cpu_to_le32(next);
450 wmb();
451 if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
452 goto done;
453 do {
454 hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
455 tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
456 } while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
457 hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
458 if (tmp_stat)
459 goto done;
460 }
461
462 /* QH configuration */
463 mEp->qh.ptr->td.next = cpu_to_le32(mReq->dma); /* TERMINATE = 0 */
464 mEp->qh.ptr->td.token &=
465 cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
466 mEp->qh.ptr->cap |= cpu_to_le32(QH_ZLT);
467
468 wmb(); /* synchronize before ep prime */
469
470 ret = hw_ep_prime(ci, mEp->num, mEp->dir,
471 mEp->type == USB_ENDPOINT_XFER_CONTROL);
472 done:
473 return ret;
474 }
475
476 /**
477 * _hardware_dequeue: handles a request at hardware level
478 * @gadget: gadget
479 * @mEp: endpoint
480 *
481 * This function returns an error code
482 */
483 static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
484 {
485 if (mReq->req.status != -EALREADY)
486 return -EINVAL;
487
488 if ((cpu_to_le32(TD_STATUS_ACTIVE) & mReq->ptr->token) != 0)
489 return -EBUSY;
490
491 if (mReq->zptr) {
492 if ((cpu_to_le32(TD_STATUS_ACTIVE) & mReq->zptr->token) != 0)
493 return -EBUSY;
494 dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
495 mReq->zptr = NULL;
496 }
497
498 mReq->req.status = 0;
499
500 usb_gadget_unmap_request(&mEp->ci->gadget, &mReq->req, mEp->dir);
501
502 mReq->req.status = le32_to_cpu(mReq->ptr->token) & TD_STATUS;
503 if ((TD_STATUS_HALTED & mReq->req.status) != 0)
504 mReq->req.status = -1;
505 else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
506 mReq->req.status = -1;
507 else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
508 mReq->req.status = -1;
509
510 mReq->req.actual = le32_to_cpu(mReq->ptr->token) & TD_TOTAL_BYTES;
511 mReq->req.actual >>= __ffs(TD_TOTAL_BYTES);
512 mReq->req.actual = mReq->req.length - mReq->req.actual;
513 mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
514
515 return mReq->req.actual;
516 }
517
518 /**
519 * _ep_nuke: dequeues all endpoint requests
520 * @mEp: endpoint
521 *
522 * This function returns an error code
523 * Caller must hold lock
524 */
525 static int _ep_nuke(struct ci13xxx_ep *mEp)
526 __releases(mEp->lock)
527 __acquires(mEp->lock)
528 {
529 if (mEp == NULL)
530 return -EINVAL;
531
532 hw_ep_flush(mEp->ci, mEp->num, mEp->dir);
533
534 while (!list_empty(&mEp->qh.queue)) {
535
536 /* pop oldest request */
537 struct ci13xxx_req *mReq = \
538 list_entry(mEp->qh.queue.next,
539 struct ci13xxx_req, queue);
540 list_del_init(&mReq->queue);
541 mReq->req.status = -ESHUTDOWN;
542
543 if (mReq->req.complete != NULL) {
544 spin_unlock(mEp->lock);
545 mReq->req.complete(&mEp->ep, &mReq->req);
546 spin_lock(mEp->lock);
547 }
548 }
549 return 0;
550 }
551
552 /**
553 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
554 * @gadget: gadget
555 *
556 * This function returns an error code
557 */
558 static int _gadget_stop_activity(struct usb_gadget *gadget)
559 {
560 struct usb_ep *ep;
561 struct ci13xxx *ci = container_of(gadget, struct ci13xxx, gadget);
562 unsigned long flags;
563
564 spin_lock_irqsave(&ci->lock, flags);
565 ci->gadget.speed = USB_SPEED_UNKNOWN;
566 ci->remote_wakeup = 0;
567 ci->suspended = 0;
568 spin_unlock_irqrestore(&ci->lock, flags);
569
570 /* flush all endpoints */
571 gadget_for_each_ep(ep, gadget) {
572 usb_ep_fifo_flush(ep);
573 }
574 usb_ep_fifo_flush(&ci->ep0out->ep);
575 usb_ep_fifo_flush(&ci->ep0in->ep);
576
577 if (ci->driver)
578 ci->driver->disconnect(gadget);
579
580 /* make sure to disable all endpoints */
581 gadget_for_each_ep(ep, gadget) {
582 usb_ep_disable(ep);
583 }
584
585 if (ci->status != NULL) {
586 usb_ep_free_request(&ci->ep0in->ep, ci->status);
587 ci->status = NULL;
588 }
589
590 return 0;
591 }
592
593 /******************************************************************************
594 * ISR block
595 *****************************************************************************/
596 /**
597 * isr_reset_handler: USB reset interrupt handler
598 * @ci: UDC device
599 *
600 * This function resets USB engine after a bus reset occurred
601 */
602 static void isr_reset_handler(struct ci13xxx *ci)
603 __releases(ci->lock)
604 __acquires(ci->lock)
605 {
606 int retval;
607
608 spin_unlock(&ci->lock);
609 retval = _gadget_stop_activity(&ci->gadget);
610 if (retval)
611 goto done;
612
613 retval = hw_usb_reset(ci);
614 if (retval)
615 goto done;
616
617 ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
618 if (ci->status == NULL)
619 retval = -ENOMEM;
620
621 done:
622 spin_lock(&ci->lock);
623
624 if (retval)
625 dev_err(ci->dev, "error: %i\n", retval);
626 }
627
628 /**
629 * isr_get_status_complete: get_status request complete function
630 * @ep: endpoint
631 * @req: request handled
632 *
633 * Caller must release lock
634 */
635 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
636 {
637 if (ep == NULL || req == NULL)
638 return;
639
640 kfree(req->buf);
641 usb_ep_free_request(ep, req);
642 }
643
644 /**
645 * isr_get_status_response: get_status request response
646 * @ci: ci struct
647 * @setup: setup request packet
648 *
649 * This function returns an error code
650 */
651 static int isr_get_status_response(struct ci13xxx *ci,
652 struct usb_ctrlrequest *setup)
653 __releases(mEp->lock)
654 __acquires(mEp->lock)
655 {
656 struct ci13xxx_ep *mEp = ci->ep0in;
657 struct usb_request *req = NULL;
658 gfp_t gfp_flags = GFP_ATOMIC;
659 int dir, num, retval;
660
661 if (mEp == NULL || setup == NULL)
662 return -EINVAL;
663
664 spin_unlock(mEp->lock);
665 req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
666 spin_lock(mEp->lock);
667 if (req == NULL)
668 return -ENOMEM;
669
670 req->complete = isr_get_status_complete;
671 req->length = 2;
672 req->buf = kzalloc(req->length, gfp_flags);
673 if (req->buf == NULL) {
674 retval = -ENOMEM;
675 goto err_free_req;
676 }
677
678 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
679 /* Assume that device is bus powered for now. */
680 *(u16 *)req->buf = ci->remote_wakeup << 1;
681 retval = 0;
682 } else if ((setup->bRequestType & USB_RECIP_MASK) \
683 == USB_RECIP_ENDPOINT) {
684 dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
685 TX : RX;
686 num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
687 *(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
688 }
689 /* else do nothing; reserved for future use */
690
691 spin_unlock(mEp->lock);
692 retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
693 spin_lock(mEp->lock);
694 if (retval)
695 goto err_free_buf;
696
697 return 0;
698
699 err_free_buf:
700 kfree(req->buf);
701 err_free_req:
702 spin_unlock(mEp->lock);
703 usb_ep_free_request(&mEp->ep, req);
704 spin_lock(mEp->lock);
705 return retval;
706 }
707
708 /**
709 * isr_setup_status_complete: setup_status request complete function
710 * @ep: endpoint
711 * @req: request handled
712 *
713 * Caller must release lock. Put the port in test mode if test mode
714 * feature is selected.
715 */
716 static void
717 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
718 {
719 struct ci13xxx *ci = req->context;
720 unsigned long flags;
721
722 if (ci->setaddr) {
723 hw_usb_set_address(ci, ci->address);
724 ci->setaddr = false;
725 }
726
727 spin_lock_irqsave(&ci->lock, flags);
728 if (ci->test_mode)
729 hw_port_test_set(ci, ci->test_mode);
730 spin_unlock_irqrestore(&ci->lock, flags);
731 }
732
733 /**
734 * isr_setup_status_phase: queues the status phase of a setup transation
735 * @ci: ci struct
736 *
737 * This function returns an error code
738 */
739 static int isr_setup_status_phase(struct ci13xxx *ci)
740 __releases(mEp->lock)
741 __acquires(mEp->lock)
742 {
743 int retval;
744 struct ci13xxx_ep *mEp;
745
746 mEp = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
747 ci->status->context = ci;
748 ci->status->complete = isr_setup_status_complete;
749
750 spin_unlock(mEp->lock);
751 retval = usb_ep_queue(&mEp->ep, ci->status, GFP_ATOMIC);
752 spin_lock(mEp->lock);
753
754 return retval;
755 }
756
757 /**
758 * isr_tr_complete_low: transaction complete low level handler
759 * @mEp: endpoint
760 *
761 * This function returns an error code
762 * Caller must hold lock
763 */
764 static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
765 __releases(mEp->lock)
766 __acquires(mEp->lock)
767 {
768 struct ci13xxx_req *mReq, *mReqTemp;
769 struct ci13xxx_ep *mEpTemp = mEp;
770 int retval = 0;
771
772 list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
773 queue) {
774 retval = _hardware_dequeue(mEp, mReq);
775 if (retval < 0)
776 break;
777 list_del_init(&mReq->queue);
778 if (mReq->req.complete != NULL) {
779 spin_unlock(mEp->lock);
780 if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
781 mReq->req.length)
782 mEpTemp = mEp->ci->ep0in;
783 mReq->req.complete(&mEpTemp->ep, &mReq->req);
784 spin_lock(mEp->lock);
785 }
786 }
787
788 if (retval == -EBUSY)
789 retval = 0;
790
791 return retval;
792 }
793
794 /**
795 * isr_tr_complete_handler: transaction complete interrupt handler
796 * @ci: UDC descriptor
797 *
798 * This function handles traffic events
799 */
800 static void isr_tr_complete_handler(struct ci13xxx *ci)
801 __releases(ci->lock)
802 __acquires(ci->lock)
803 {
804 unsigned i;
805 u8 tmode = 0;
806
807 for (i = 0; i < ci->hw_ep_max; i++) {
808 struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i];
809 int type, num, dir, err = -EINVAL;
810 struct usb_ctrlrequest req;
811
812 if (mEp->ep.desc == NULL)
813 continue; /* not configured */
814
815 if (hw_test_and_clear_complete(ci, i)) {
816 err = isr_tr_complete_low(mEp);
817 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
818 if (err > 0) /* needs status phase */
819 err = isr_setup_status_phase(ci);
820 if (err < 0) {
821 spin_unlock(&ci->lock);
822 if (usb_ep_set_halt(&mEp->ep))
823 dev_err(ci->dev,
824 "error: ep_set_halt\n");
825 spin_lock(&ci->lock);
826 }
827 }
828 }
829
830 if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
831 !hw_test_and_clear_setup_status(ci, i))
832 continue;
833
834 if (i != 0) {
835 dev_warn(ci->dev, "ctrl traffic at endpoint %d\n", i);
836 continue;
837 }
838
839 /*
840 * Flush data and handshake transactions of previous
841 * setup packet.
842 */
843 _ep_nuke(ci->ep0out);
844 _ep_nuke(ci->ep0in);
845
846 /* read_setup_packet */
847 do {
848 hw_test_and_set_setup_guard(ci);
849 memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
850 } while (!hw_test_and_clear_setup_guard(ci));
851
852 type = req.bRequestType;
853
854 ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
855
856 switch (req.bRequest) {
857 case USB_REQ_CLEAR_FEATURE:
858 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
859 le16_to_cpu(req.wValue) ==
860 USB_ENDPOINT_HALT) {
861 if (req.wLength != 0)
862 break;
863 num = le16_to_cpu(req.wIndex);
864 dir = num & USB_ENDPOINT_DIR_MASK;
865 num &= USB_ENDPOINT_NUMBER_MASK;
866 if (dir) /* TX */
867 num += ci->hw_ep_max/2;
868 if (!ci->ci13xxx_ep[num].wedge) {
869 spin_unlock(&ci->lock);
870 err = usb_ep_clear_halt(
871 &ci->ci13xxx_ep[num].ep);
872 spin_lock(&ci->lock);
873 if (err)
874 break;
875 }
876 err = isr_setup_status_phase(ci);
877 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
878 le16_to_cpu(req.wValue) ==
879 USB_DEVICE_REMOTE_WAKEUP) {
880 if (req.wLength != 0)
881 break;
882 ci->remote_wakeup = 0;
883 err = isr_setup_status_phase(ci);
884 } else {
885 goto delegate;
886 }
887 break;
888 case USB_REQ_GET_STATUS:
889 if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
890 type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
891 type != (USB_DIR_IN|USB_RECIP_INTERFACE))
892 goto delegate;
893 if (le16_to_cpu(req.wLength) != 2 ||
894 le16_to_cpu(req.wValue) != 0)
895 break;
896 err = isr_get_status_response(ci, &req);
897 break;
898 case USB_REQ_SET_ADDRESS:
899 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
900 goto delegate;
901 if (le16_to_cpu(req.wLength) != 0 ||
902 le16_to_cpu(req.wIndex) != 0)
903 break;
904 ci->address = (u8)le16_to_cpu(req.wValue);
905 ci->setaddr = true;
906 err = isr_setup_status_phase(ci);
907 break;
908 case USB_REQ_SET_FEATURE:
909 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
910 le16_to_cpu(req.wValue) ==
911 USB_ENDPOINT_HALT) {
912 if (req.wLength != 0)
913 break;
914 num = le16_to_cpu(req.wIndex);
915 dir = num & USB_ENDPOINT_DIR_MASK;
916 num &= USB_ENDPOINT_NUMBER_MASK;
917 if (dir) /* TX */
918 num += ci->hw_ep_max/2;
919
920 spin_unlock(&ci->lock);
921 err = usb_ep_set_halt(&ci->ci13xxx_ep[num].ep);
922 spin_lock(&ci->lock);
923 if (!err)
924 isr_setup_status_phase(ci);
925 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
926 if (req.wLength != 0)
927 break;
928 switch (le16_to_cpu(req.wValue)) {
929 case USB_DEVICE_REMOTE_WAKEUP:
930 ci->remote_wakeup = 1;
931 err = isr_setup_status_phase(ci);
932 break;
933 case USB_DEVICE_TEST_MODE:
934 tmode = le16_to_cpu(req.wIndex) >> 8;
935 switch (tmode) {
936 case TEST_J:
937 case TEST_K:
938 case TEST_SE0_NAK:
939 case TEST_PACKET:
940 case TEST_FORCE_EN:
941 ci->test_mode = tmode;
942 err = isr_setup_status_phase(
943 ci);
944 break;
945 default:
946 break;
947 }
948 default:
949 goto delegate;
950 }
951 } else {
952 goto delegate;
953 }
954 break;
955 default:
956 delegate:
957 if (req.wLength == 0) /* no data phase */
958 ci->ep0_dir = TX;
959
960 spin_unlock(&ci->lock);
961 err = ci->driver->setup(&ci->gadget, &req);
962 spin_lock(&ci->lock);
963 break;
964 }
965
966 if (err < 0) {
967 spin_unlock(&ci->lock);
968 if (usb_ep_set_halt(&mEp->ep))
969 dev_err(ci->dev, "error: ep_set_halt\n");
970 spin_lock(&ci->lock);
971 }
972 }
973 }
974
975 /******************************************************************************
976 * ENDPT block
977 *****************************************************************************/
978 /**
979 * ep_enable: configure endpoint, making it usable
980 *
981 * Check usb_ep_enable() at "usb_gadget.h" for details
982 */
983 static int ep_enable(struct usb_ep *ep,
984 const struct usb_endpoint_descriptor *desc)
985 {
986 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
987 int retval = 0;
988 unsigned long flags;
989
990 if (ep == NULL || desc == NULL)
991 return -EINVAL;
992
993 spin_lock_irqsave(mEp->lock, flags);
994
995 /* only internal SW should enable ctrl endpts */
996
997 mEp->ep.desc = desc;
998
999 if (!list_empty(&mEp->qh.queue))
1000 dev_warn(mEp->ci->dev, "enabling a non-empty endpoint!\n");
1001
1002 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
1003 mEp->num = usb_endpoint_num(desc);
1004 mEp->type = usb_endpoint_type(desc);
1005
1006 mEp->ep.maxpacket = usb_endpoint_maxp(desc);
1007
1008 mEp->qh.ptr->cap = 0;
1009
1010 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1011 mEp->qh.ptr->cap |= cpu_to_le32(QH_IOS);
1012 else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
1013 mEp->qh.ptr->cap &= cpu_to_le32(~QH_MULT);
1014 else
1015 mEp->qh.ptr->cap &= cpu_to_le32(~QH_ZLT);
1016
1017 mEp->qh.ptr->cap |= cpu_to_le32((mEp->ep.maxpacket << __ffs(QH_MAX_PKT))
1018 & QH_MAX_PKT);
1019 mEp->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */
1020
1021 /*
1022 * Enable endpoints in the HW other than ep0 as ep0
1023 * is always enabled
1024 */
1025 if (mEp->num)
1026 retval |= hw_ep_enable(mEp->ci, mEp->num, mEp->dir, mEp->type);
1027
1028 spin_unlock_irqrestore(mEp->lock, flags);
1029 return retval;
1030 }
1031
1032 /**
1033 * ep_disable: endpoint is no longer usable
1034 *
1035 * Check usb_ep_disable() at "usb_gadget.h" for details
1036 */
1037 static int ep_disable(struct usb_ep *ep)
1038 {
1039 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1040 int direction, retval = 0;
1041 unsigned long flags;
1042
1043 if (ep == NULL)
1044 return -EINVAL;
1045 else if (mEp->ep.desc == NULL)
1046 return -EBUSY;
1047
1048 spin_lock_irqsave(mEp->lock, flags);
1049
1050 /* only internal SW should disable ctrl endpts */
1051
1052 direction = mEp->dir;
1053 do {
1054 retval |= _ep_nuke(mEp);
1055 retval |= hw_ep_disable(mEp->ci, mEp->num, mEp->dir);
1056
1057 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1058 mEp->dir = (mEp->dir == TX) ? RX : TX;
1059
1060 } while (mEp->dir != direction);
1061
1062 mEp->ep.desc = NULL;
1063
1064 spin_unlock_irqrestore(mEp->lock, flags);
1065 return retval;
1066 }
1067
1068 /**
1069 * ep_alloc_request: allocate a request object to use with this endpoint
1070 *
1071 * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1072 */
1073 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1074 {
1075 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1076 struct ci13xxx_req *mReq = NULL;
1077
1078 if (ep == NULL)
1079 return NULL;
1080
1081 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
1082 if (mReq != NULL) {
1083 INIT_LIST_HEAD(&mReq->queue);
1084
1085 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
1086 &mReq->dma);
1087 if (mReq->ptr == NULL) {
1088 kfree(mReq);
1089 mReq = NULL;
1090 }
1091 }
1092
1093 return (mReq == NULL) ? NULL : &mReq->req;
1094 }
1095
1096 /**
1097 * ep_free_request: frees a request object
1098 *
1099 * Check usb_ep_free_request() at "usb_gadget.h" for details
1100 */
1101 static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1102 {
1103 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1104 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1105 unsigned long flags;
1106
1107 if (ep == NULL || req == NULL) {
1108 return;
1109 } else if (!list_empty(&mReq->queue)) {
1110 dev_err(mEp->ci->dev, "freeing queued request\n");
1111 return;
1112 }
1113
1114 spin_lock_irqsave(mEp->lock, flags);
1115
1116 if (mReq->ptr)
1117 dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
1118 kfree(mReq);
1119
1120 spin_unlock_irqrestore(mEp->lock, flags);
1121 }
1122
1123 /**
1124 * ep_queue: queues (submits) an I/O request to an endpoint
1125 *
1126 * Check usb_ep_queue()* at usb_gadget.h" for details
1127 */
1128 static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1129 gfp_t __maybe_unused gfp_flags)
1130 {
1131 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1132 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1133 struct ci13xxx *ci = mEp->ci;
1134 int retval = 0;
1135 unsigned long flags;
1136
1137 if (ep == NULL || req == NULL || mEp->ep.desc == NULL)
1138 return -EINVAL;
1139
1140 spin_lock_irqsave(mEp->lock, flags);
1141
1142 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
1143 if (req->length)
1144 mEp = (ci->ep0_dir == RX) ?
1145 ci->ep0out : ci->ep0in;
1146 if (!list_empty(&mEp->qh.queue)) {
1147 _ep_nuke(mEp);
1148 retval = -EOVERFLOW;
1149 dev_warn(mEp->ci->dev, "endpoint ctrl %X nuked\n",
1150 _usb_addr(mEp));
1151 }
1152 }
1153
1154 /* first nuke then test link, e.g. previous status has not sent */
1155 if (!list_empty(&mReq->queue)) {
1156 retval = -EBUSY;
1157 dev_err(mEp->ci->dev, "request already in queue\n");
1158 goto done;
1159 }
1160
1161 if (req->length > 4 * CI13XXX_PAGE_SIZE) {
1162 req->length = 4 * CI13XXX_PAGE_SIZE;
1163 retval = -EMSGSIZE;
1164 dev_warn(mEp->ci->dev, "request length truncated\n");
1165 }
1166
1167 /* push request */
1168 mReq->req.status = -EINPROGRESS;
1169 mReq->req.actual = 0;
1170
1171 retval = _hardware_enqueue(mEp, mReq);
1172
1173 if (retval == -EALREADY)
1174 retval = 0;
1175 if (!retval)
1176 list_add_tail(&mReq->queue, &mEp->qh.queue);
1177
1178 done:
1179 spin_unlock_irqrestore(mEp->lock, flags);
1180 return retval;
1181 }
1182
1183 /**
1184 * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1185 *
1186 * Check usb_ep_dequeue() at "usb_gadget.h" for details
1187 */
1188 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1189 {
1190 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1191 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1192 unsigned long flags;
1193
1194 if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
1195 mEp->ep.desc == NULL || list_empty(&mReq->queue) ||
1196 list_empty(&mEp->qh.queue))
1197 return -EINVAL;
1198
1199 spin_lock_irqsave(mEp->lock, flags);
1200
1201 hw_ep_flush(mEp->ci, mEp->num, mEp->dir);
1202
1203 /* pop request */
1204 list_del_init(&mReq->queue);
1205
1206 usb_gadget_unmap_request(&mEp->ci->gadget, req, mEp->dir);
1207
1208 req->status = -ECONNRESET;
1209
1210 if (mReq->req.complete != NULL) {
1211 spin_unlock(mEp->lock);
1212 mReq->req.complete(&mEp->ep, &mReq->req);
1213 spin_lock(mEp->lock);
1214 }
1215
1216 spin_unlock_irqrestore(mEp->lock, flags);
1217 return 0;
1218 }
1219
1220 /**
1221 * ep_set_halt: sets the endpoint halt feature
1222 *
1223 * Check usb_ep_set_halt() at "usb_gadget.h" for details
1224 */
1225 static int ep_set_halt(struct usb_ep *ep, int value)
1226 {
1227 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1228 int direction, retval = 0;
1229 unsigned long flags;
1230
1231 if (ep == NULL || mEp->ep.desc == NULL)
1232 return -EINVAL;
1233
1234 spin_lock_irqsave(mEp->lock, flags);
1235
1236 #ifndef STALL_IN
1237 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
1238 if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
1239 !list_empty(&mEp->qh.queue)) {
1240 spin_unlock_irqrestore(mEp->lock, flags);
1241 return -EAGAIN;
1242 }
1243 #endif
1244
1245 direction = mEp->dir;
1246 do {
1247 retval |= hw_ep_set_halt(mEp->ci, mEp->num, mEp->dir, value);
1248
1249 if (!value)
1250 mEp->wedge = 0;
1251
1252 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1253 mEp->dir = (mEp->dir == TX) ? RX : TX;
1254
1255 } while (mEp->dir != direction);
1256
1257 spin_unlock_irqrestore(mEp->lock, flags);
1258 return retval;
1259 }
1260
1261 /**
1262 * ep_set_wedge: sets the halt feature and ignores clear requests
1263 *
1264 * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1265 */
1266 static int ep_set_wedge(struct usb_ep *ep)
1267 {
1268 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1269 unsigned long flags;
1270
1271 if (ep == NULL || mEp->ep.desc == NULL)
1272 return -EINVAL;
1273
1274 spin_lock_irqsave(mEp->lock, flags);
1275 mEp->wedge = 1;
1276 spin_unlock_irqrestore(mEp->lock, flags);
1277
1278 return usb_ep_set_halt(ep);
1279 }
1280
1281 /**
1282 * ep_fifo_flush: flushes contents of a fifo
1283 *
1284 * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1285 */
1286 static void ep_fifo_flush(struct usb_ep *ep)
1287 {
1288 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1289 unsigned long flags;
1290
1291 if (ep == NULL) {
1292 dev_err(mEp->ci->dev, "%02X: -EINVAL\n", _usb_addr(mEp));
1293 return;
1294 }
1295
1296 spin_lock_irqsave(mEp->lock, flags);
1297
1298 hw_ep_flush(mEp->ci, mEp->num, mEp->dir);
1299
1300 spin_unlock_irqrestore(mEp->lock, flags);
1301 }
1302
1303 /**
1304 * Endpoint-specific part of the API to the USB controller hardware
1305 * Check "usb_gadget.h" for details
1306 */
1307 static const struct usb_ep_ops usb_ep_ops = {
1308 .enable = ep_enable,
1309 .disable = ep_disable,
1310 .alloc_request = ep_alloc_request,
1311 .free_request = ep_free_request,
1312 .queue = ep_queue,
1313 .dequeue = ep_dequeue,
1314 .set_halt = ep_set_halt,
1315 .set_wedge = ep_set_wedge,
1316 .fifo_flush = ep_fifo_flush,
1317 };
1318
1319 /******************************************************************************
1320 * GADGET block
1321 *****************************************************************************/
1322 static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
1323 {
1324 struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget);
1325 unsigned long flags;
1326 int gadget_ready = 0;
1327
1328 if (!(ci->platdata->flags & CI13XXX_PULLUP_ON_VBUS))
1329 return -EOPNOTSUPP;
1330
1331 spin_lock_irqsave(&ci->lock, flags);
1332 ci->vbus_active = is_active;
1333 if (ci->driver)
1334 gadget_ready = 1;
1335 spin_unlock_irqrestore(&ci->lock, flags);
1336
1337 if (gadget_ready) {
1338 if (is_active) {
1339 pm_runtime_get_sync(&_gadget->dev);
1340 hw_device_reset(ci, USBMODE_CM_DC);
1341 hw_device_state(ci, ci->ep0out->qh.dma);
1342 } else {
1343 hw_device_state(ci, 0);
1344 if (ci->platdata->notify_event)
1345 ci->platdata->notify_event(ci,
1346 CI13XXX_CONTROLLER_STOPPED_EVENT);
1347 _gadget_stop_activity(&ci->gadget);
1348 pm_runtime_put_sync(&_gadget->dev);
1349 }
1350 }
1351
1352 return 0;
1353 }
1354
1355 static int ci13xxx_wakeup(struct usb_gadget *_gadget)
1356 {
1357 struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget);
1358 unsigned long flags;
1359 int ret = 0;
1360
1361 spin_lock_irqsave(&ci->lock, flags);
1362 if (!ci->remote_wakeup) {
1363 ret = -EOPNOTSUPP;
1364 goto out;
1365 }
1366 if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
1367 ret = -EINVAL;
1368 goto out;
1369 }
1370 hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1371 out:
1372 spin_unlock_irqrestore(&ci->lock, flags);
1373 return ret;
1374 }
1375
1376 static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1377 {
1378 struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget);
1379
1380 if (ci->transceiver)
1381 return usb_phy_set_power(ci->transceiver, mA);
1382 return -ENOTSUPP;
1383 }
1384
1385 /* Change Data+ pullup status
1386 * this func is used by usb_gadget_connect/disconnet
1387 */
1388 static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_on)
1389 {
1390 struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget);
1391
1392 if (is_on)
1393 hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
1394 else
1395 hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
1396
1397 return 0;
1398 }
1399
1400 static int ci13xxx_start(struct usb_gadget *gadget,
1401 struct usb_gadget_driver *driver);
1402 static int ci13xxx_stop(struct usb_gadget *gadget,
1403 struct usb_gadget_driver *driver);
1404 /**
1405 * Device operations part of the API to the USB controller hardware,
1406 * which don't involve endpoints (or i/o)
1407 * Check "usb_gadget.h" for details
1408 */
1409 static const struct usb_gadget_ops usb_gadget_ops = {
1410 .vbus_session = ci13xxx_vbus_session,
1411 .wakeup = ci13xxx_wakeup,
1412 .pullup = ci13xxx_pullup,
1413 .vbus_draw = ci13xxx_vbus_draw,
1414 .udc_start = ci13xxx_start,
1415 .udc_stop = ci13xxx_stop,
1416 };
1417
1418 static int init_eps(struct ci13xxx *ci)
1419 {
1420 int retval = 0, i, j;
1421
1422 for (i = 0; i < ci->hw_ep_max/2; i++)
1423 for (j = RX; j <= TX; j++) {
1424 int k = i + j * ci->hw_ep_max/2;
1425 struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[k];
1426
1427 scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
1428 (j == TX) ? "in" : "out");
1429
1430 mEp->ci = ci;
1431 mEp->lock = &ci->lock;
1432 mEp->td_pool = ci->td_pool;
1433
1434 mEp->ep.name = mEp->name;
1435 mEp->ep.ops = &usb_ep_ops;
1436 /*
1437 * for ep0: maxP defined in desc, for other
1438 * eps, maxP is set by epautoconfig() called
1439 * by gadget layer
1440 */
1441 mEp->ep.maxpacket = (unsigned short)~0;
1442
1443 INIT_LIST_HEAD(&mEp->qh.queue);
1444 mEp->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL,
1445 &mEp->qh.dma);
1446 if (mEp->qh.ptr == NULL)
1447 retval = -ENOMEM;
1448 else
1449 memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
1450
1451 /*
1452 * set up shorthands for ep0 out and in endpoints,
1453 * don't add to gadget's ep_list
1454 */
1455 if (i == 0) {
1456 if (j == RX)
1457 ci->ep0out = mEp;
1458 else
1459 ci->ep0in = mEp;
1460
1461 mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
1462 continue;
1463 }
1464
1465 list_add_tail(&mEp->ep.ep_list, &ci->gadget.ep_list);
1466 }
1467
1468 return retval;
1469 }
1470
1471 static void destroy_eps(struct ci13xxx *ci)
1472 {
1473 int i;
1474
1475 for (i = 0; i < ci->hw_ep_max; i++) {
1476 struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i];
1477
1478 dma_pool_free(ci->qh_pool, mEp->qh.ptr, mEp->qh.dma);
1479 }
1480 }
1481
1482 /**
1483 * ci13xxx_start: register a gadget driver
1484 * @gadget: our gadget
1485 * @driver: the driver being registered
1486 *
1487 * Interrupts are enabled here.
1488 */
1489 static int ci13xxx_start(struct usb_gadget *gadget,
1490 struct usb_gadget_driver *driver)
1491 {
1492 struct ci13xxx *ci = container_of(gadget, struct ci13xxx, gadget);
1493 unsigned long flags;
1494 int retval = -ENOMEM;
1495
1496 if (driver->disconnect == NULL)
1497 return -EINVAL;
1498
1499
1500 ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
1501 retval = usb_ep_enable(&ci->ep0out->ep);
1502 if (retval)
1503 return retval;
1504
1505 ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
1506 retval = usb_ep_enable(&ci->ep0in->ep);
1507 if (retval)
1508 return retval;
1509 spin_lock_irqsave(&ci->lock, flags);
1510
1511 ci->driver = driver;
1512 pm_runtime_get_sync(&ci->gadget.dev);
1513 if (ci->platdata->flags & CI13XXX_PULLUP_ON_VBUS) {
1514 if (ci->vbus_active) {
1515 if (ci->platdata->flags & CI13XXX_REGS_SHARED)
1516 hw_device_reset(ci, USBMODE_CM_DC);
1517 } else {
1518 pm_runtime_put_sync(&ci->gadget.dev);
1519 goto done;
1520 }
1521 }
1522
1523 retval = hw_device_state(ci, ci->ep0out->qh.dma);
1524 if (retval)
1525 pm_runtime_put_sync(&ci->gadget.dev);
1526
1527 done:
1528 spin_unlock_irqrestore(&ci->lock, flags);
1529 return retval;
1530 }
1531
1532 /**
1533 * ci13xxx_stop: unregister a gadget driver
1534 */
1535 static int ci13xxx_stop(struct usb_gadget *gadget,
1536 struct usb_gadget_driver *driver)
1537 {
1538 struct ci13xxx *ci = container_of(gadget, struct ci13xxx, gadget);
1539 unsigned long flags;
1540
1541 spin_lock_irqsave(&ci->lock, flags);
1542
1543 if (!(ci->platdata->flags & CI13XXX_PULLUP_ON_VBUS) ||
1544 ci->vbus_active) {
1545 hw_device_state(ci, 0);
1546 if (ci->platdata->notify_event)
1547 ci->platdata->notify_event(ci,
1548 CI13XXX_CONTROLLER_STOPPED_EVENT);
1549 ci->driver = NULL;
1550 spin_unlock_irqrestore(&ci->lock, flags);
1551 _gadget_stop_activity(&ci->gadget);
1552 spin_lock_irqsave(&ci->lock, flags);
1553 pm_runtime_put(&ci->gadget.dev);
1554 }
1555
1556 spin_unlock_irqrestore(&ci->lock, flags);
1557
1558 return 0;
1559 }
1560
1561 /******************************************************************************
1562 * BUS block
1563 *****************************************************************************/
1564 /**
1565 * udc_irq: ci interrupt handler
1566 *
1567 * This function returns IRQ_HANDLED if the IRQ has been handled
1568 * It locks access to registers
1569 */
1570 static irqreturn_t udc_irq(struct ci13xxx *ci)
1571 {
1572 irqreturn_t retval;
1573 u32 intr;
1574
1575 if (ci == NULL)
1576 return IRQ_HANDLED;
1577
1578 spin_lock(&ci->lock);
1579
1580 if (ci->platdata->flags & CI13XXX_REGS_SHARED) {
1581 if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
1582 USBMODE_CM_DC) {
1583 spin_unlock(&ci->lock);
1584 return IRQ_NONE;
1585 }
1586 }
1587 intr = hw_test_and_clear_intr_active(ci);
1588
1589 if (intr) {
1590 /* order defines priority - do NOT change it */
1591 if (USBi_URI & intr)
1592 isr_reset_handler(ci);
1593
1594 if (USBi_PCI & intr) {
1595 ci->gadget.speed = hw_port_is_high_speed(ci) ?
1596 USB_SPEED_HIGH : USB_SPEED_FULL;
1597 if (ci->suspended && ci->driver->resume) {
1598 spin_unlock(&ci->lock);
1599 ci->driver->resume(&ci->gadget);
1600 spin_lock(&ci->lock);
1601 ci->suspended = 0;
1602 }
1603 }
1604
1605 if (USBi_UI & intr)
1606 isr_tr_complete_handler(ci);
1607
1608 if (USBi_SLI & intr) {
1609 if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
1610 ci->driver->suspend) {
1611 ci->suspended = 1;
1612 spin_unlock(&ci->lock);
1613 ci->driver->suspend(&ci->gadget);
1614 spin_lock(&ci->lock);
1615 }
1616 }
1617 retval = IRQ_HANDLED;
1618 } else {
1619 retval = IRQ_NONE;
1620 }
1621 spin_unlock(&ci->lock);
1622
1623 return retval;
1624 }
1625
1626 /**
1627 * udc_release: driver release function
1628 * @dev: device
1629 *
1630 * Currently does nothing
1631 */
1632 static void udc_release(struct device *dev)
1633 {
1634 }
1635
1636 /**
1637 * udc_start: initialize gadget role
1638 * @ci: chipidea controller
1639 */
1640 static int udc_start(struct ci13xxx *ci)
1641 {
1642 struct device *dev = ci->dev;
1643 int retval = 0;
1644
1645 spin_lock_init(&ci->lock);
1646
1647 ci->gadget.ops = &usb_gadget_ops;
1648 ci->gadget.speed = USB_SPEED_UNKNOWN;
1649 ci->gadget.max_speed = USB_SPEED_HIGH;
1650 ci->gadget.is_otg = 0;
1651 ci->gadget.name = ci->platdata->name;
1652
1653 INIT_LIST_HEAD(&ci->gadget.ep_list);
1654
1655 dev_set_name(&ci->gadget.dev, "gadget");
1656 ci->gadget.dev.dma_mask = dev->dma_mask;
1657 ci->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
1658 ci->gadget.dev.parent = dev;
1659 ci->gadget.dev.release = udc_release;
1660
1661 /* alloc resources */
1662 ci->qh_pool = dma_pool_create("ci13xxx_qh", dev,
1663 sizeof(struct ci13xxx_qh),
1664 64, CI13XXX_PAGE_SIZE);
1665 if (ci->qh_pool == NULL)
1666 return -ENOMEM;
1667
1668 ci->td_pool = dma_pool_create("ci13xxx_td", dev,
1669 sizeof(struct ci13xxx_td),
1670 64, CI13XXX_PAGE_SIZE);
1671 if (ci->td_pool == NULL) {
1672 retval = -ENOMEM;
1673 goto free_qh_pool;
1674 }
1675
1676 retval = init_eps(ci);
1677 if (retval)
1678 goto free_pools;
1679
1680 ci->gadget.ep0 = &ci->ep0in->ep;
1681
1682 if (ci->global_phy)
1683 ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
1684
1685 if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
1686 if (ci->transceiver == NULL) {
1687 retval = -ENODEV;
1688 goto destroy_eps;
1689 }
1690 }
1691
1692 if (!(ci->platdata->flags & CI13XXX_REGS_SHARED)) {
1693 retval = hw_device_reset(ci, USBMODE_CM_DC);
1694 if (retval)
1695 goto put_transceiver;
1696 }
1697
1698 retval = device_register(&ci->gadget.dev);
1699 if (retval) {
1700 put_device(&ci->gadget.dev);
1701 goto put_transceiver;
1702 }
1703
1704 if (!IS_ERR_OR_NULL(ci->transceiver)) {
1705 retval = otg_set_peripheral(ci->transceiver->otg,
1706 &ci->gadget);
1707 if (retval)
1708 goto unreg_device;
1709 }
1710
1711 retval = usb_add_gadget_udc(dev, &ci->gadget);
1712 if (retval)
1713 goto remove_trans;
1714
1715 pm_runtime_no_callbacks(&ci->gadget.dev);
1716 pm_runtime_enable(&ci->gadget.dev);
1717
1718 return retval;
1719
1720 remove_trans:
1721 if (!IS_ERR_OR_NULL(ci->transceiver)) {
1722 otg_set_peripheral(ci->transceiver->otg, NULL);
1723 if (ci->global_phy)
1724 usb_put_phy(ci->transceiver);
1725 }
1726
1727 dev_err(dev, "error = %i\n", retval);
1728 unreg_device:
1729 device_unregister(&ci->gadget.dev);
1730 put_transceiver:
1731 if (!IS_ERR_OR_NULL(ci->transceiver) && ci->global_phy)
1732 usb_put_phy(ci->transceiver);
1733 destroy_eps:
1734 destroy_eps(ci);
1735 free_pools:
1736 dma_pool_destroy(ci->td_pool);
1737 free_qh_pool:
1738 dma_pool_destroy(ci->qh_pool);
1739 return retval;
1740 }
1741
1742 /**
1743 * udc_remove: parent remove must call this to remove UDC
1744 *
1745 * No interrupts active, the IRQ has been released
1746 */
1747 static void udc_stop(struct ci13xxx *ci)
1748 {
1749 if (ci == NULL)
1750 return;
1751
1752 usb_del_gadget_udc(&ci->gadget);
1753
1754 destroy_eps(ci);
1755
1756 dma_pool_destroy(ci->td_pool);
1757 dma_pool_destroy(ci->qh_pool);
1758
1759 if (!IS_ERR_OR_NULL(ci->transceiver)) {
1760 otg_set_peripheral(ci->transceiver->otg, NULL);
1761 if (ci->global_phy)
1762 usb_put_phy(ci->transceiver);
1763 }
1764 device_unregister(&ci->gadget.dev);
1765 /* my kobject is dynamic, I swear! */
1766 memset(&ci->gadget, 0, sizeof(ci->gadget));
1767 }
1768
1769 /**
1770 * ci_hdrc_gadget_init - initialize device related bits
1771 * ci: the controller
1772 *
1773 * This function enables the gadget role, if the device is "device capable".
1774 */
1775 int ci_hdrc_gadget_init(struct ci13xxx *ci)
1776 {
1777 struct ci_role_driver *rdrv;
1778
1779 if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
1780 return -ENXIO;
1781
1782 rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
1783 if (!rdrv)
1784 return -ENOMEM;
1785
1786 rdrv->start = udc_start;
1787 rdrv->stop = udc_stop;
1788 rdrv->irq = udc_irq;
1789 rdrv->name = "gadget";
1790 ci->roles[CI_ROLE_GADGET] = rdrv;
1791
1792 return 0;
1793 }