usb: gadget: rename CONFIG_USB_GADGET_PXA25X
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <linux/kernel.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/interrupt.h>
46#include <linux/io.h>
47#include <linux/list.h>
48#include <linux/dma-mapping.h>
49
50#include <linux/usb/ch9.h>
51#include <linux/usb/gadget.h>
52
53#include "core.h"
54#include "gadget.h"
55#include "io.h"
56
04a9bfcd
FB
57/**
58 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
59 * @dwc: pointer to our context structure
60 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
61 *
62 * Caller should take care of locking. This function will
63 * return 0 on success or -EINVAL if wrong Test Selector
64 * is passed
65 */
66int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
67{
68 u32 reg;
69
70 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
71 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
72
73 switch (mode) {
74 case TEST_J:
75 case TEST_K:
76 case TEST_SE0_NAK:
77 case TEST_PACKET:
78 case TEST_FORCE_EN:
79 reg |= mode << 1;
80 break;
81 default:
82 return -EINVAL;
83 }
84
85 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
86
87 return 0;
88}
89
8598bde7
FB
90/**
91 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
92 * @dwc: pointer to our context structure
93 * @state: the state to put link into
94 *
95 * Caller should take care of locking. This function will
aee63e3c 96 * return 0 on success or -ETIMEDOUT.
8598bde7
FB
97 */
98int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
99{
aee63e3c 100 int retries = 10000;
8598bde7
FB
101 u32 reg;
102
802fde98
PZ
103 /*
104 * Wait until device controller is ready. Only applies to 1.94a and
105 * later RTL.
106 */
107 if (dwc->revision >= DWC3_REVISION_194A) {
108 while (--retries) {
109 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
110 if (reg & DWC3_DSTS_DCNRD)
111 udelay(5);
112 else
113 break;
114 }
115
116 if (retries <= 0)
117 return -ETIMEDOUT;
118 }
119
8598bde7
FB
120 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
121 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
122
123 /* set requested state */
124 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
125 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
126
802fde98
PZ
127 /*
128 * The following code is racy when called from dwc3_gadget_wakeup,
129 * and is not needed, at least on newer versions
130 */
131 if (dwc->revision >= DWC3_REVISION_194A)
132 return 0;
133
8598bde7 134 /* wait for a change in DSTS */
aed430e5 135 retries = 10000;
8598bde7
FB
136 while (--retries) {
137 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
138
8598bde7
FB
139 if (DWC3_DSTS_USBLNKST(reg) == state)
140 return 0;
141
aee63e3c 142 udelay(5);
8598bde7
FB
143 }
144
145 dev_vdbg(dwc->dev, "link state change request timed out\n");
146
147 return -ETIMEDOUT;
148}
149
457e84b6
FB
150/**
151 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
152 * @dwc: pointer to our context structure
153 *
154 * This function will a best effort FIFO allocation in order
155 * to improve FIFO usage and throughput, while still allowing
156 * us to enable as many endpoints as possible.
157 *
158 * Keep in mind that this operation will be highly dependent
159 * on the configured size for RAM1 - which contains TxFifo -,
160 * the amount of endpoints enabled on coreConsultant tool, and
161 * the width of the Master Bus.
162 *
163 * In the ideal world, we would always be able to satisfy the
164 * following equation:
165 *
166 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
167 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
168 *
169 * Unfortunately, due to many variables that's not always the case.
170 */
171int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
172{
173 int last_fifo_depth = 0;
174 int ram1_depth;
175 int fifo_size;
176 int mdwidth;
177 int num;
178
179 if (!dwc->needs_fifo_resize)
180 return 0;
181
182 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
183 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
184
185 /* MDWIDTH is represented in bits, we need it in bytes */
186 mdwidth >>= 3;
187
188 /*
189 * FIXME For now we will only allocate 1 wMaxPacketSize space
190 * for each enabled endpoint, later patches will come to
191 * improve this algorithm so that we better use the internal
192 * FIFO space
193 */
194 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
195 struct dwc3_ep *dep = dwc->eps[num];
196 int fifo_number = dep->number >> 1;
2e81c36a 197 int mult = 1;
457e84b6
FB
198 int tmp;
199
200 if (!(dep->number & 1))
201 continue;
202
203 if (!(dep->flags & DWC3_EP_ENABLED))
204 continue;
205
16e78db7
IS
206 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
207 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
2e81c36a
FB
208 mult = 3;
209
210 /*
211 * REVISIT: the following assumes we will always have enough
212 * space available on the FIFO RAM for all possible use cases.
213 * Make sure that's true somehow and change FIFO allocation
214 * accordingly.
215 *
216 * If we have Bulk or Isochronous endpoints, we want
217 * them to be able to be very, very fast. So we're giving
218 * those endpoints a fifo_size which is enough for 3 full
219 * packets
220 */
221 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
457e84b6
FB
222 tmp += mdwidth;
223
224 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
2e81c36a 225
457e84b6
FB
226 fifo_size |= (last_fifo_depth << 16);
227
228 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
229 dep->name, last_fifo_depth, fifo_size & 0xffff);
230
231 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
232 fifo_size);
233
234 last_fifo_depth += (fifo_size & 0xffff);
235 }
236
237 return 0;
238}
239
72246da4
FB
240void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
241 int status)
242{
243 struct dwc3 *dwc = dep->dwc;
e5ba5ec8 244 int i;
72246da4
FB
245
246 if (req->queued) {
e5ba5ec8
PA
247 i = 0;
248 do {
eeb720fb 249 dep->busy_slot++;
e5ba5ec8
PA
250 /*
251 * Skip LINK TRB. We can't use req->trb and check for
252 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
253 * just completed (not the LINK TRB).
254 */
255 if (((dep->busy_slot & DWC3_TRB_MASK) ==
256 DWC3_TRB_NUM- 1) &&
16e78db7 257 usb_endpoint_xfer_isoc(dep->endpoint.desc))
e5ba5ec8
PA
258 dep->busy_slot++;
259 } while(++i < req->request.num_mapped_sgs);
c9fda7d6 260 req->queued = false;
72246da4
FB
261 }
262 list_del(&req->list);
eeb720fb 263 req->trb = NULL;
72246da4
FB
264
265 if (req->request.status == -EINPROGRESS)
266 req->request.status = status;
267
0416e494
PA
268 if (dwc->ep0_bounced && dep->number == 0)
269 dwc->ep0_bounced = false;
270 else
271 usb_gadget_unmap_request(&dwc->gadget, &req->request,
272 req->direction);
72246da4
FB
273
274 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
275 req, dep->name, req->request.actual,
276 req->request.length, status);
277
278 spin_unlock(&dwc->lock);
0fc9a1be 279 req->request.complete(&dep->endpoint, &req->request);
72246da4
FB
280 spin_lock(&dwc->lock);
281}
282
283static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
284{
285 switch (cmd) {
286 case DWC3_DEPCMD_DEPSTARTCFG:
287 return "Start New Configuration";
288 case DWC3_DEPCMD_ENDTRANSFER:
289 return "End Transfer";
290 case DWC3_DEPCMD_UPDATETRANSFER:
291 return "Update Transfer";
292 case DWC3_DEPCMD_STARTTRANSFER:
293 return "Start Transfer";
294 case DWC3_DEPCMD_CLEARSTALL:
295 return "Clear Stall";
296 case DWC3_DEPCMD_SETSTALL:
297 return "Set Stall";
802fde98
PZ
298 case DWC3_DEPCMD_GETEPSTATE:
299 return "Get Endpoint State";
72246da4
FB
300 case DWC3_DEPCMD_SETTRANSFRESOURCE:
301 return "Set Endpoint Transfer Resource";
302 case DWC3_DEPCMD_SETEPCONFIG:
303 return "Set Endpoint Configuration";
304 default:
305 return "UNKNOWN command";
306 }
307}
308
b09bb642
FB
309int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
310{
311 u32 timeout = 500;
312 u32 reg;
313
314 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
315 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
316
317 do {
318 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
319 if (!(reg & DWC3_DGCMD_CMDACT)) {
320 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
321 DWC3_DGCMD_STATUS(reg));
322 return 0;
323 }
324
325 /*
326 * We can't sleep here, because it's also called from
327 * interrupt context.
328 */
329 timeout--;
330 if (!timeout)
331 return -ETIMEDOUT;
332 udelay(1);
333 } while (1);
334}
335
72246da4
FB
336int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
337 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
338{
339 struct dwc3_ep *dep = dwc->eps[ep];
61d58242 340 u32 timeout = 500;
72246da4
FB
341 u32 reg;
342
343 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
344 dep->name,
dc1c70a7
FB
345 dwc3_gadget_ep_cmd_string(cmd), params->param0,
346 params->param1, params->param2);
72246da4 347
dc1c70a7
FB
348 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
349 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
350 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
72246da4
FB
351
352 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
353 do {
354 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
355 if (!(reg & DWC3_DEPCMD_CMDACT)) {
164f6e14
FB
356 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
357 DWC3_DEPCMD_STATUS(reg));
72246da4
FB
358 return 0;
359 }
360
361 /*
72246da4
FB
362 * We can't sleep here, because it is also called from
363 * interrupt context.
364 */
365 timeout--;
366 if (!timeout)
367 return -ETIMEDOUT;
368
61d58242 369 udelay(1);
72246da4
FB
370 } while (1);
371}
372
373static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
f6bafc6a 374 struct dwc3_trb *trb)
72246da4 375{
c439ef87 376 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
377
378 return dep->trb_pool_dma + offset;
379}
380
381static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
382{
383 struct dwc3 *dwc = dep->dwc;
384
385 if (dep->trb_pool)
386 return 0;
387
388 if (dep->number == 0 || dep->number == 1)
389 return 0;
390
391 dep->trb_pool = dma_alloc_coherent(dwc->dev,
392 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
393 &dep->trb_pool_dma, GFP_KERNEL);
394 if (!dep->trb_pool) {
395 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
396 dep->name);
397 return -ENOMEM;
398 }
399
400 return 0;
401}
402
403static void dwc3_free_trb_pool(struct dwc3_ep *dep)
404{
405 struct dwc3 *dwc = dep->dwc;
406
407 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
408 dep->trb_pool, dep->trb_pool_dma);
409
410 dep->trb_pool = NULL;
411 dep->trb_pool_dma = 0;
412}
413
414static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
415{
416 struct dwc3_gadget_ep_cmd_params params;
417 u32 cmd;
418
419 memset(&params, 0x00, sizeof(params));
420
421 if (dep->number != 1) {
422 cmd = DWC3_DEPCMD_DEPSTARTCFG;
423 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
b23c8439
PZ
424 if (dep->number > 1) {
425 if (dwc->start_config_issued)
426 return 0;
427 dwc->start_config_issued = true;
72246da4 428 cmd |= DWC3_DEPCMD_PARAM(2);
b23c8439 429 }
72246da4
FB
430
431 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
432 }
433
434 return 0;
435}
436
437static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec 438 const struct usb_endpoint_descriptor *desc,
4b345c9a
FB
439 const struct usb_ss_ep_comp_descriptor *comp_desc,
440 bool ignore)
72246da4
FB
441{
442 struct dwc3_gadget_ep_cmd_params params;
443
444 memset(&params, 0x00, sizeof(params));
445
dc1c70a7 446 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
d2e9a13a
CP
447 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
448
449 /* Burst size is only needed in SuperSpeed mode */
450 if (dwc->gadget.speed == USB_SPEED_SUPER) {
451 u32 burst = dep->endpoint.maxburst - 1;
452
453 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
454 }
72246da4 455
4b345c9a
FB
456 if (ignore)
457 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
458
dc1c70a7
FB
459 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
460 | DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 461
18b7ede5 462 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
463 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
464 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
465 dep->stream_capable = true;
466 }
467
72246da4 468 if (usb_endpoint_xfer_isoc(desc))
dc1c70a7 469 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
470
471 /*
472 * We are doing 1:1 mapping for endpoints, meaning
473 * Physical Endpoints 2 maps to Logical Endpoint 2 and
474 * so on. We consider the direction bit as part of the physical
475 * endpoint number. So USB endpoint 0x81 is 0x03.
476 */
dc1c70a7 477 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
478
479 /*
480 * We must use the lower 16 TX FIFOs even though
481 * HW might have more
482 */
483 if (dep->direction)
dc1c70a7 484 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
485
486 if (desc->bInterval) {
dc1c70a7 487 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
488 dep->interval = 1 << (desc->bInterval - 1);
489 }
490
491 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
492 DWC3_DEPCMD_SETEPCONFIG, &params);
493}
494
495static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
496{
497 struct dwc3_gadget_ep_cmd_params params;
498
499 memset(&params, 0x00, sizeof(params));
500
dc1c70a7 501 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4
FB
502
503 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
504 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
505}
506
507/**
508 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
509 * @dep: endpoint to be initialized
510 * @desc: USB Endpoint Descriptor
511 *
512 * Caller should take care of locking
513 */
514static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec 515 const struct usb_endpoint_descriptor *desc,
4b345c9a
FB
516 const struct usb_ss_ep_comp_descriptor *comp_desc,
517 bool ignore)
72246da4
FB
518{
519 struct dwc3 *dwc = dep->dwc;
520 u32 reg;
521 int ret = -ENOMEM;
522
523 if (!(dep->flags & DWC3_EP_ENABLED)) {
524 ret = dwc3_gadget_start_config(dwc, dep);
525 if (ret)
526 return ret;
527 }
528
4b345c9a 529 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
72246da4
FB
530 if (ret)
531 return ret;
532
533 if (!(dep->flags & DWC3_EP_ENABLED)) {
f6bafc6a
FB
534 struct dwc3_trb *trb_st_hw;
535 struct dwc3_trb *trb_link;
72246da4
FB
536
537 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
538 if (ret)
539 return ret;
540
16e78db7 541 dep->endpoint.desc = desc;
c90bfaec 542 dep->comp_desc = comp_desc;
72246da4
FB
543 dep->type = usb_endpoint_type(desc);
544 dep->flags |= DWC3_EP_ENABLED;
545
546 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
547 reg |= DWC3_DALEPENA_EP(dep->number);
548 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
549
550 if (!usb_endpoint_xfer_isoc(desc))
551 return 0;
552
553 memset(&trb_link, 0, sizeof(trb_link));
554
1d046793 555 /* Link TRB for ISOC. The HWO bit is never reset */
72246da4
FB
556 trb_st_hw = &dep->trb_pool[0];
557
f6bafc6a 558 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
72246da4 559
f6bafc6a
FB
560 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
561 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
562 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
563 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
72246da4
FB
564 }
565
566 return 0;
567}
568
624407f9
SAS
569static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
570static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
571{
572 struct dwc3_request *req;
573
ea53b882 574 if (!list_empty(&dep->req_queued)) {
624407f9
SAS
575 dwc3_stop_active_transfer(dwc, dep->number);
576
57911504 577 /* - giveback all requests to gadget driver */
1591633e
PA
578 while (!list_empty(&dep->req_queued)) {
579 req = next_request(&dep->req_queued);
580
581 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
582 }
ea53b882
FB
583 }
584
72246da4
FB
585 while (!list_empty(&dep->request_list)) {
586 req = next_request(&dep->request_list);
587
624407f9 588 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 589 }
72246da4
FB
590}
591
592/**
593 * __dwc3_gadget_ep_disable - Disables a HW endpoint
594 * @dep: the endpoint to disable
595 *
624407f9
SAS
596 * This function also removes requests which are currently processed ny the
597 * hardware and those which are not yet scheduled.
598 * Caller should take care of locking.
72246da4 599 */
72246da4
FB
600static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
601{
602 struct dwc3 *dwc = dep->dwc;
603 u32 reg;
604
624407f9 605 dwc3_remove_requests(dwc, dep);
72246da4
FB
606
607 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
608 reg &= ~DWC3_DALEPENA_EP(dep->number);
609 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
610
879631aa 611 dep->stream_capable = false;
f9c56cdd 612 dep->endpoint.desc = NULL;
c90bfaec 613 dep->comp_desc = NULL;
72246da4 614 dep->type = 0;
879631aa 615 dep->flags = 0;
72246da4
FB
616
617 return 0;
618}
619
620/* -------------------------------------------------------------------------- */
621
622static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
623 const struct usb_endpoint_descriptor *desc)
624{
625 return -EINVAL;
626}
627
628static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
629{
630 return -EINVAL;
631}
632
633/* -------------------------------------------------------------------------- */
634
635static int dwc3_gadget_ep_enable(struct usb_ep *ep,
636 const struct usb_endpoint_descriptor *desc)
637{
638 struct dwc3_ep *dep;
639 struct dwc3 *dwc;
640 unsigned long flags;
641 int ret;
642
643 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
644 pr_debug("dwc3: invalid parameters\n");
645 return -EINVAL;
646 }
647
648 if (!desc->wMaxPacketSize) {
649 pr_debug("dwc3: missing wMaxPacketSize\n");
650 return -EINVAL;
651 }
652
653 dep = to_dwc3_ep(ep);
654 dwc = dep->dwc;
655
c6f83f38
FB
656 if (dep->flags & DWC3_EP_ENABLED) {
657 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
658 dep->name);
659 return 0;
660 }
661
72246da4
FB
662 switch (usb_endpoint_type(desc)) {
663 case USB_ENDPOINT_XFER_CONTROL:
27a78d6a 664 strlcat(dep->name, "-control", sizeof(dep->name));
72246da4
FB
665 break;
666 case USB_ENDPOINT_XFER_ISOC:
27a78d6a 667 strlcat(dep->name, "-isoc", sizeof(dep->name));
72246da4
FB
668 break;
669 case USB_ENDPOINT_XFER_BULK:
27a78d6a 670 strlcat(dep->name, "-bulk", sizeof(dep->name));
72246da4
FB
671 break;
672 case USB_ENDPOINT_XFER_INT:
27a78d6a 673 strlcat(dep->name, "-int", sizeof(dep->name));
72246da4
FB
674 break;
675 default:
676 dev_err(dwc->dev, "invalid endpoint transfer type\n");
677 }
678
72246da4
FB
679 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
680
681 spin_lock_irqsave(&dwc->lock, flags);
4b345c9a 682 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false);
72246da4
FB
683 spin_unlock_irqrestore(&dwc->lock, flags);
684
685 return ret;
686}
687
688static int dwc3_gadget_ep_disable(struct usb_ep *ep)
689{
690 struct dwc3_ep *dep;
691 struct dwc3 *dwc;
692 unsigned long flags;
693 int ret;
694
695 if (!ep) {
696 pr_debug("dwc3: invalid parameters\n");
697 return -EINVAL;
698 }
699
700 dep = to_dwc3_ep(ep);
701 dwc = dep->dwc;
702
703 if (!(dep->flags & DWC3_EP_ENABLED)) {
704 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
705 dep->name);
706 return 0;
707 }
708
709 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
710 dep->number >> 1,
711 (dep->number & 1) ? "in" : "out");
712
713 spin_lock_irqsave(&dwc->lock, flags);
714 ret = __dwc3_gadget_ep_disable(dep);
715 spin_unlock_irqrestore(&dwc->lock, flags);
716
717 return ret;
718}
719
720static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
721 gfp_t gfp_flags)
722{
723 struct dwc3_request *req;
724 struct dwc3_ep *dep = to_dwc3_ep(ep);
725 struct dwc3 *dwc = dep->dwc;
726
727 req = kzalloc(sizeof(*req), gfp_flags);
728 if (!req) {
729 dev_err(dwc->dev, "not enough memory\n");
730 return NULL;
731 }
732
733 req->epnum = dep->number;
734 req->dep = dep;
72246da4
FB
735
736 return &req->request;
737}
738
739static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
740 struct usb_request *request)
741{
742 struct dwc3_request *req = to_dwc3_request(request);
743
744 kfree(req);
745}
746
c71fc37c
FB
747/**
748 * dwc3_prepare_one_trb - setup one TRB from one request
749 * @dep: endpoint for which this request is prepared
750 * @req: dwc3_request pointer
751 */
68e823e2 752static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb 753 struct dwc3_request *req, dma_addr_t dma,
e5ba5ec8 754 unsigned length, unsigned last, unsigned chain, unsigned node)
c71fc37c 755{
eeb720fb 756 struct dwc3 *dwc = dep->dwc;
f6bafc6a 757 struct dwc3_trb *trb;
c71fc37c 758
eeb720fb
FB
759 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
760 dep->name, req, (unsigned long long) dma,
761 length, last ? " last" : "",
762 chain ? " chain" : "");
763
c71fc37c 764 /* Skip the LINK-TRB on ISOC */
915e202a 765 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
16e78db7 766 usb_endpoint_xfer_isoc(dep->endpoint.desc))
915e202a
PA
767 dep->free_slot++;
768
769 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
c71fc37c 770
eeb720fb
FB
771 if (!req->trb) {
772 dwc3_gadget_move_request_queued(req);
f6bafc6a
FB
773 req->trb = trb;
774 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
e5ba5ec8 775 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
eeb720fb 776 }
c71fc37c 777
e5ba5ec8
PA
778 dep->free_slot++;
779
f6bafc6a
FB
780 trb->size = DWC3_TRB_SIZE_LENGTH(length);
781 trb->bpl = lower_32_bits(dma);
782 trb->bph = upper_32_bits(dma);
c71fc37c 783
16e78db7 784 switch (usb_endpoint_type(dep->endpoint.desc)) {
c71fc37c 785 case USB_ENDPOINT_XFER_CONTROL:
f6bafc6a 786 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
c71fc37c
FB
787 break;
788
789 case USB_ENDPOINT_XFER_ISOC:
e5ba5ec8
PA
790 if (!node)
791 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
792 else
793 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
c71fc37c 794
e5ba5ec8 795 if (!req->request.no_interrupt && !chain)
f6bafc6a 796 trb->ctrl |= DWC3_TRB_CTRL_IOC;
c71fc37c
FB
797 break;
798
799 case USB_ENDPOINT_XFER_BULK:
800 case USB_ENDPOINT_XFER_INT:
f6bafc6a 801 trb->ctrl = DWC3_TRBCTL_NORMAL;
c71fc37c
FB
802 break;
803 default:
804 /*
805 * This is only possible with faulty memory because we
806 * checked it already :)
807 */
808 BUG();
809 }
810
16e78db7 811 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
f6bafc6a
FB
812 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
813 trb->ctrl |= DWC3_TRB_CTRL_CSP;
e5ba5ec8
PA
814 } else if (last) {
815 trb->ctrl |= DWC3_TRB_CTRL_LST;
f6bafc6a 816 }
c71fc37c 817
e5ba5ec8
PA
818 if (chain)
819 trb->ctrl |= DWC3_TRB_CTRL_CHN;
820
16e78db7 821 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
f6bafc6a 822 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
c71fc37c 823
f6bafc6a 824 trb->ctrl |= DWC3_TRB_CTRL_HWO;
c71fc37c
FB
825}
826
72246da4
FB
827/*
828 * dwc3_prepare_trbs - setup TRBs from requests
829 * @dep: endpoint for which requests are being prepared
830 * @starting: true if the endpoint is idle and no requests are queued.
831 *
1d046793
PZ
832 * The function goes through the requests list and sets up TRBs for the
833 * transfers. The function returns once there are no more TRBs available or
834 * it runs out of requests.
72246da4 835 */
68e823e2 836static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
72246da4 837{
68e823e2 838 struct dwc3_request *req, *n;
72246da4 839 u32 trbs_left;
8d62cd65 840 u32 max;
c71fc37c 841 unsigned int last_one = 0;
72246da4
FB
842
843 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
844
845 /* the first request must not be queued */
846 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
c71fc37c 847
8d62cd65 848 /* Can't wrap around on a non-isoc EP since there's no link TRB */
16e78db7 849 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
8d62cd65
PZ
850 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
851 if (trbs_left > max)
852 trbs_left = max;
853 }
854
72246da4 855 /*
1d046793
PZ
856 * If busy & slot are equal than it is either full or empty. If we are
857 * starting to process requests then we are empty. Otherwise we are
72246da4
FB
858 * full and don't do anything
859 */
860 if (!trbs_left) {
861 if (!starting)
68e823e2 862 return;
72246da4
FB
863 trbs_left = DWC3_TRB_NUM;
864 /*
865 * In case we start from scratch, we queue the ISOC requests
866 * starting from slot 1. This is done because we use ring
867 * buffer and have no LST bit to stop us. Instead, we place
1d046793 868 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
72246da4
FB
869 * after the first request so we start at slot 1 and have
870 * 7 requests proceed before we hit the first IOC.
871 * Other transfer types don't use the ring buffer and are
872 * processed from the first TRB until the last one. Since we
873 * don't wrap around we have to start at the beginning.
874 */
16e78db7 875 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
876 dep->busy_slot = 1;
877 dep->free_slot = 1;
878 } else {
879 dep->busy_slot = 0;
880 dep->free_slot = 0;
881 }
882 }
883
884 /* The last TRB is a link TRB, not used for xfer */
16e78db7 885 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
68e823e2 886 return;
72246da4
FB
887
888 list_for_each_entry_safe(req, n, &dep->request_list, list) {
eeb720fb
FB
889 unsigned length;
890 dma_addr_t dma;
e5ba5ec8 891 last_one = false;
72246da4 892
eeb720fb
FB
893 if (req->request.num_mapped_sgs > 0) {
894 struct usb_request *request = &req->request;
895 struct scatterlist *sg = request->sg;
896 struct scatterlist *s;
897 int i;
72246da4 898
eeb720fb
FB
899 for_each_sg(sg, s, request->num_mapped_sgs, i) {
900 unsigned chain = true;
72246da4 901
eeb720fb
FB
902 length = sg_dma_len(s);
903 dma = sg_dma_address(s);
72246da4 904
1d046793
PZ
905 if (i == (request->num_mapped_sgs - 1) ||
906 sg_is_last(s)) {
e5ba5ec8
PA
907 if (list_is_last(&req->list,
908 &dep->request_list))
909 last_one = true;
eeb720fb
FB
910 chain = false;
911 }
72246da4 912
eeb720fb
FB
913 trbs_left--;
914 if (!trbs_left)
915 last_one = true;
72246da4 916
eeb720fb
FB
917 if (last_one)
918 chain = false;
72246da4 919
eeb720fb 920 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 921 last_one, chain, i);
72246da4 922
eeb720fb
FB
923 if (last_one)
924 break;
925 }
72246da4 926 } else {
eeb720fb
FB
927 dma = req->request.dma;
928 length = req->request.length;
929 trbs_left--;
72246da4 930
eeb720fb
FB
931 if (!trbs_left)
932 last_one = 1;
879631aa 933
eeb720fb
FB
934 /* Is this the last request? */
935 if (list_is_last(&req->list, &dep->request_list))
936 last_one = 1;
72246da4 937
eeb720fb 938 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 939 last_one, false, 0);
72246da4 940
eeb720fb
FB
941 if (last_one)
942 break;
72246da4 943 }
72246da4 944 }
72246da4
FB
945}
946
947static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
948 int start_new)
949{
950 struct dwc3_gadget_ep_cmd_params params;
951 struct dwc3_request *req;
952 struct dwc3 *dwc = dep->dwc;
953 int ret;
954 u32 cmd;
955
956 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
957 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
958 return -EBUSY;
959 }
960 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
961
962 /*
963 * If we are getting here after a short-out-packet we don't enqueue any
964 * new requests as we try to set the IOC bit only on the last request.
965 */
966 if (start_new) {
967 if (list_empty(&dep->req_queued))
968 dwc3_prepare_trbs(dep, start_new);
969
970 /* req points to the first request which will be sent */
971 req = next_request(&dep->req_queued);
972 } else {
68e823e2
FB
973 dwc3_prepare_trbs(dep, start_new);
974
72246da4 975 /*
1d046793 976 * req points to the first request where HWO changed from 0 to 1
72246da4 977 */
68e823e2 978 req = next_request(&dep->req_queued);
72246da4
FB
979 }
980 if (!req) {
981 dep->flags |= DWC3_EP_PENDING_REQUEST;
982 return 0;
983 }
984
985 memset(&params, 0, sizeof(params));
72246da4 986
1877d6c9
PA
987 if (start_new) {
988 params.param0 = upper_32_bits(req->trb_dma);
989 params.param1 = lower_32_bits(req->trb_dma);
72246da4 990 cmd = DWC3_DEPCMD_STARTTRANSFER;
1877d6c9 991 } else {
72246da4 992 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1877d6c9 993 }
72246da4
FB
994
995 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
996 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
997 if (ret < 0) {
998 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
999
1000 /*
1001 * FIXME we need to iterate over the list of requests
1002 * here and stop, unmap, free and del each of the linked
1d046793 1003 * requests instead of what we do now.
72246da4 1004 */
0fc9a1be
FB
1005 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1006 req->direction);
72246da4
FB
1007 list_del(&req->list);
1008 return ret;
1009 }
1010
1011 dep->flags |= DWC3_EP_BUSY;
25b8ff68 1012
f898ae09 1013 if (start_new) {
b4996a86 1014 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
f898ae09 1015 dep->number);
b4996a86 1016 WARN_ON_ONCE(!dep->resource_index);
f898ae09 1017 }
25b8ff68 1018
72246da4
FB
1019 return 0;
1020}
1021
d6d6ec7b
PA
1022static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1023 struct dwc3_ep *dep, u32 cur_uf)
1024{
1025 u32 uf;
1026
1027 if (list_empty(&dep->request_list)) {
1028 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1029 dep->name);
f4a53c55 1030 dep->flags |= DWC3_EP_PENDING_REQUEST;
d6d6ec7b
PA
1031 return;
1032 }
1033
1034 /* 4 micro frames in the future */
1035 uf = cur_uf + dep->interval * 4;
1036
1037 __dwc3_gadget_kick_transfer(dep, uf, 1);
1038}
1039
1040static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1041 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1042{
1043 u32 cur_uf, mask;
1044
1045 mask = ~(dep->interval - 1);
1046 cur_uf = event->parameters & mask;
1047
1048 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1049}
1050
72246da4
FB
1051static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1052{
0fc9a1be
FB
1053 struct dwc3 *dwc = dep->dwc;
1054 int ret;
1055
72246da4
FB
1056 req->request.actual = 0;
1057 req->request.status = -EINPROGRESS;
1058 req->direction = dep->direction;
1059 req->epnum = dep->number;
1060
1061 /*
1062 * We only add to our list of requests now and
1063 * start consuming the list once we get XferNotReady
1064 * IRQ.
1065 *
1066 * That way, we avoid doing anything that we don't need
1067 * to do now and defer it until the point we receive a
1068 * particular token from the Host side.
1069 *
1070 * This will also avoid Host cancelling URBs due to too
1d046793 1071 * many NAKs.
72246da4 1072 */
0fc9a1be
FB
1073 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1074 dep->direction);
1075 if (ret)
1076 return ret;
1077
72246da4
FB
1078 list_add_tail(&req->list, &dep->request_list);
1079
1080 /*
b511e5e7 1081 * There are a few special cases:
72246da4 1082 *
f898ae09
PZ
1083 * 1. XferNotReady with empty list of requests. We need to kick the
1084 * transfer here in that situation, otherwise we will be NAKing
1085 * forever. If we get XferNotReady before gadget driver has a
1086 * chance to queue a request, we will ACK the IRQ but won't be
1087 * able to receive the data until the next request is queued.
1088 * The following code is handling exactly that.
72246da4 1089 *
72246da4
FB
1090 */
1091 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
f4a53c55
PA
1092 /*
1093 * If xfernotready is already elapsed and it is a case
1094 * of isoc transfer, then issue END TRANSFER, so that
1095 * you can receive xfernotready again and can have
1096 * notion of current microframe.
1097 */
1098 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
cdc359dd
PA
1099 if (list_empty(&dep->req_queued)) {
1100 dwc3_stop_active_transfer(dwc, dep->number);
1101 dep->flags = DWC3_EP_ENABLED;
1102 }
f4a53c55
PA
1103 return 0;
1104 }
1105
b511e5e7 1106 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
348e026f 1107 if (ret && ret != -EBUSY)
b511e5e7
FB
1108 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1109 dep->name);
15f86bde 1110 return ret;
b511e5e7 1111 }
72246da4 1112
b511e5e7
FB
1113 /*
1114 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1115 * kick the transfer here after queuing a request, otherwise the
1116 * core may not see the modified TRB(s).
1117 */
1118 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
79c9046e
PA
1119 (dep->flags & DWC3_EP_BUSY) &&
1120 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
b4996a86
FB
1121 WARN_ON_ONCE(!dep->resource_index);
1122 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
b511e5e7 1123 false);
348e026f 1124 if (ret && ret != -EBUSY)
72246da4
FB
1125 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1126 dep->name);
15f86bde 1127 return ret;
a0925324 1128 }
72246da4
FB
1129
1130 return 0;
1131}
1132
1133static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1134 gfp_t gfp_flags)
1135{
1136 struct dwc3_request *req = to_dwc3_request(request);
1137 struct dwc3_ep *dep = to_dwc3_ep(ep);
1138 struct dwc3 *dwc = dep->dwc;
1139
1140 unsigned long flags;
1141
1142 int ret;
1143
16e78db7 1144 if (!dep->endpoint.desc) {
72246da4
FB
1145 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1146 request, ep->name);
1147 return -ESHUTDOWN;
1148 }
1149
1150 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1151 request, ep->name, request->length);
1152
1153 spin_lock_irqsave(&dwc->lock, flags);
1154 ret = __dwc3_gadget_ep_queue(dep, req);
1155 spin_unlock_irqrestore(&dwc->lock, flags);
1156
1157 return ret;
1158}
1159
1160static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1161 struct usb_request *request)
1162{
1163 struct dwc3_request *req = to_dwc3_request(request);
1164 struct dwc3_request *r = NULL;
1165
1166 struct dwc3_ep *dep = to_dwc3_ep(ep);
1167 struct dwc3 *dwc = dep->dwc;
1168
1169 unsigned long flags;
1170 int ret = 0;
1171
1172 spin_lock_irqsave(&dwc->lock, flags);
1173
1174 list_for_each_entry(r, &dep->request_list, list) {
1175 if (r == req)
1176 break;
1177 }
1178
1179 if (r != req) {
1180 list_for_each_entry(r, &dep->req_queued, list) {
1181 if (r == req)
1182 break;
1183 }
1184 if (r == req) {
1185 /* wait until it is processed */
1186 dwc3_stop_active_transfer(dwc, dep->number);
e8d4e8be 1187 goto out1;
72246da4
FB
1188 }
1189 dev_err(dwc->dev, "request %p was not queued to %s\n",
1190 request, ep->name);
1191 ret = -EINVAL;
1192 goto out0;
1193 }
1194
e8d4e8be 1195out1:
72246da4
FB
1196 /* giveback the request */
1197 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1198
1199out0:
1200 spin_unlock_irqrestore(&dwc->lock, flags);
1201
1202 return ret;
1203}
1204
1205int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1206{
1207 struct dwc3_gadget_ep_cmd_params params;
1208 struct dwc3 *dwc = dep->dwc;
1209 int ret;
1210
1211 memset(&params, 0x00, sizeof(params));
1212
1213 if (value) {
72246da4
FB
1214 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1215 DWC3_DEPCMD_SETSTALL, &params);
1216 if (ret)
1217 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1218 value ? "set" : "clear",
1219 dep->name);
1220 else
1221 dep->flags |= DWC3_EP_STALL;
1222 } else {
1223 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1224 DWC3_DEPCMD_CLEARSTALL, &params);
1225 if (ret)
1226 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1227 value ? "set" : "clear",
1228 dep->name);
1229 else
e6303463 1230 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
72246da4 1231 }
5275455a 1232
72246da4
FB
1233 return ret;
1234}
1235
1236static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1237{
1238 struct dwc3_ep *dep = to_dwc3_ep(ep);
1239 struct dwc3 *dwc = dep->dwc;
1240
1241 unsigned long flags;
1242
1243 int ret;
1244
1245 spin_lock_irqsave(&dwc->lock, flags);
1246
16e78db7 1247 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1248 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1249 ret = -EINVAL;
1250 goto out;
1251 }
1252
1253 ret = __dwc3_gadget_ep_set_halt(dep, value);
1254out:
1255 spin_unlock_irqrestore(&dwc->lock, flags);
1256
1257 return ret;
1258}
1259
1260static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1261{
1262 struct dwc3_ep *dep = to_dwc3_ep(ep);
249a4569
PZ
1263 struct dwc3 *dwc = dep->dwc;
1264 unsigned long flags;
72246da4 1265
249a4569 1266 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1267 dep->flags |= DWC3_EP_WEDGE;
249a4569 1268 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4 1269
08f0d966
PA
1270 if (dep->number == 0 || dep->number == 1)
1271 return dwc3_gadget_ep0_set_halt(ep, 1);
1272 else
1273 return dwc3_gadget_ep_set_halt(ep, 1);
72246da4
FB
1274}
1275
1276/* -------------------------------------------------------------------------- */
1277
1278static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1279 .bLength = USB_DT_ENDPOINT_SIZE,
1280 .bDescriptorType = USB_DT_ENDPOINT,
1281 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1282};
1283
1284static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1285 .enable = dwc3_gadget_ep0_enable,
1286 .disable = dwc3_gadget_ep0_disable,
1287 .alloc_request = dwc3_gadget_ep_alloc_request,
1288 .free_request = dwc3_gadget_ep_free_request,
1289 .queue = dwc3_gadget_ep0_queue,
1290 .dequeue = dwc3_gadget_ep_dequeue,
08f0d966 1291 .set_halt = dwc3_gadget_ep0_set_halt,
72246da4
FB
1292 .set_wedge = dwc3_gadget_ep_set_wedge,
1293};
1294
1295static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1296 .enable = dwc3_gadget_ep_enable,
1297 .disable = dwc3_gadget_ep_disable,
1298 .alloc_request = dwc3_gadget_ep_alloc_request,
1299 .free_request = dwc3_gadget_ep_free_request,
1300 .queue = dwc3_gadget_ep_queue,
1301 .dequeue = dwc3_gadget_ep_dequeue,
1302 .set_halt = dwc3_gadget_ep_set_halt,
1303 .set_wedge = dwc3_gadget_ep_set_wedge,
1304};
1305
1306/* -------------------------------------------------------------------------- */
1307
1308static int dwc3_gadget_get_frame(struct usb_gadget *g)
1309{
1310 struct dwc3 *dwc = gadget_to_dwc(g);
1311 u32 reg;
1312
1313 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1314 return DWC3_DSTS_SOFFN(reg);
1315}
1316
1317static int dwc3_gadget_wakeup(struct usb_gadget *g)
1318{
1319 struct dwc3 *dwc = gadget_to_dwc(g);
1320
1321 unsigned long timeout;
1322 unsigned long flags;
1323
1324 u32 reg;
1325
1326 int ret = 0;
1327
1328 u8 link_state;
1329 u8 speed;
1330
1331 spin_lock_irqsave(&dwc->lock, flags);
1332
1333 /*
1334 * According to the Databook Remote wakeup request should
1335 * be issued only when the device is in early suspend state.
1336 *
1337 * We can check that via USB Link State bits in DSTS register.
1338 */
1339 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1340
1341 speed = reg & DWC3_DSTS_CONNECTSPD;
1342 if (speed == DWC3_DSTS_SUPERSPEED) {
1343 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1344 ret = -EINVAL;
1345 goto out;
1346 }
1347
1348 link_state = DWC3_DSTS_USBLNKST(reg);
1349
1350 switch (link_state) {
1351 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1352 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1353 break;
1354 default:
1355 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1356 link_state);
1357 ret = -EINVAL;
1358 goto out;
1359 }
1360
8598bde7
FB
1361 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1362 if (ret < 0) {
1363 dev_err(dwc->dev, "failed to put link in Recovery\n");
1364 goto out;
1365 }
72246da4 1366
802fde98
PZ
1367 /* Recent versions do this automatically */
1368 if (dwc->revision < DWC3_REVISION_194A) {
1369 /* write zeroes to Link Change Request */
fcc023c7 1370 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
802fde98
PZ
1371 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1372 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1373 }
72246da4 1374
1d046793 1375 /* poll until Link State changes to ON */
72246da4
FB
1376 timeout = jiffies + msecs_to_jiffies(100);
1377
1d046793 1378 while (!time_after(jiffies, timeout)) {
72246da4
FB
1379 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1380
1381 /* in HS, means ON */
1382 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1383 break;
1384 }
1385
1386 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1387 dev_err(dwc->dev, "failed to send remote wakeup\n");
1388 ret = -EINVAL;
1389 }
1390
1391out:
1392 spin_unlock_irqrestore(&dwc->lock, flags);
1393
1394 return ret;
1395}
1396
1397static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1398 int is_selfpowered)
1399{
1400 struct dwc3 *dwc = gadget_to_dwc(g);
249a4569 1401 unsigned long flags;
72246da4 1402
249a4569 1403 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1404 dwc->is_selfpowered = !!is_selfpowered;
249a4569 1405 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4
FB
1406
1407 return 0;
1408}
1409
6f17f74b 1410static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
72246da4
FB
1411{
1412 u32 reg;
61d58242 1413 u32 timeout = 500;
72246da4
FB
1414
1415 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
8db7ed15 1416 if (is_on) {
802fde98
PZ
1417 if (dwc->revision <= DWC3_REVISION_187A) {
1418 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1419 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1420 }
1421
1422 if (dwc->revision >= DWC3_REVISION_194A)
1423 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1424 reg |= DWC3_DCTL_RUN_STOP;
9fcb3bd8 1425 dwc->pullups_connected = true;
8db7ed15 1426 } else {
72246da4 1427 reg &= ~DWC3_DCTL_RUN_STOP;
9fcb3bd8 1428 dwc->pullups_connected = false;
8db7ed15 1429 }
72246da4
FB
1430
1431 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1432
1433 do {
1434 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1435 if (is_on) {
1436 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1437 break;
1438 } else {
1439 if (reg & DWC3_DSTS_DEVCTRLHLT)
1440 break;
1441 }
72246da4
FB
1442 timeout--;
1443 if (!timeout)
6f17f74b 1444 return -ETIMEDOUT;
61d58242 1445 udelay(1);
72246da4
FB
1446 } while (1);
1447
1448 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1449 dwc->gadget_driver
1450 ? dwc->gadget_driver->function : "no-function",
1451 is_on ? "connect" : "disconnect");
6f17f74b
PA
1452
1453 return 0;
72246da4
FB
1454}
1455
1456static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1457{
1458 struct dwc3 *dwc = gadget_to_dwc(g);
1459 unsigned long flags;
6f17f74b 1460 int ret;
72246da4
FB
1461
1462 is_on = !!is_on;
1463
1464 spin_lock_irqsave(&dwc->lock, flags);
6f17f74b 1465 ret = dwc3_gadget_run_stop(dwc, is_on);
72246da4
FB
1466 spin_unlock_irqrestore(&dwc->lock, flags);
1467
6f17f74b 1468 return ret;
72246da4
FB
1469}
1470
8698e2ac
FB
1471static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1472{
1473 u32 reg;
1474
1475 /* Enable all but Start and End of Frame IRQs */
1476 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1477 DWC3_DEVTEN_EVNTOVERFLOWEN |
1478 DWC3_DEVTEN_CMDCMPLTEN |
1479 DWC3_DEVTEN_ERRTICERREN |
1480 DWC3_DEVTEN_WKUPEVTEN |
1481 DWC3_DEVTEN_ULSTCNGEN |
1482 DWC3_DEVTEN_CONNECTDONEEN |
1483 DWC3_DEVTEN_USBRSTEN |
1484 DWC3_DEVTEN_DISCONNEVTEN);
1485
1486 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1487}
1488
1489static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1490{
1491 /* mask all interrupts */
1492 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1493}
1494
1495static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
b15a762f 1496static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
8698e2ac 1497
72246da4
FB
1498static int dwc3_gadget_start(struct usb_gadget *g,
1499 struct usb_gadget_driver *driver)
1500{
1501 struct dwc3 *dwc = gadget_to_dwc(g);
1502 struct dwc3_ep *dep;
1503 unsigned long flags;
1504 int ret = 0;
8698e2ac 1505 int irq;
72246da4
FB
1506 u32 reg;
1507
734b2fe9
FB
1508 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1509 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1510 IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
1511 if (ret) {
1512 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1513 irq, ret);
1514 goto err0;
1515 }
1516
72246da4
FB
1517 spin_lock_irqsave(&dwc->lock, flags);
1518
1519 if (dwc->gadget_driver) {
1520 dev_err(dwc->dev, "%s is already bound to %s\n",
1521 dwc->gadget.name,
1522 dwc->gadget_driver->driver.name);
1523 ret = -EBUSY;
734b2fe9 1524 goto err1;
72246da4
FB
1525 }
1526
1527 dwc->gadget_driver = driver;
72246da4 1528
72246da4
FB
1529 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1530 reg &= ~(DWC3_DCFG_SPEED_MASK);
07e7f47b
FB
1531
1532 /**
1533 * WORKAROUND: DWC3 revision < 2.20a have an issue
1534 * which would cause metastability state on Run/Stop
1535 * bit if we try to force the IP to USB2-only mode.
1536 *
1537 * Because of that, we cannot configure the IP to any
1538 * speed other than the SuperSpeed
1539 *
1540 * Refers to:
1541 *
1542 * STAR#9000525659: Clock Domain Crossing on DCTL in
1543 * USB 2.0 Mode
1544 */
1545 if (dwc->revision < DWC3_REVISION_220A)
1546 reg |= DWC3_DCFG_SUPERSPEED;
1547 else
1548 reg |= dwc->maximum_speed;
72246da4
FB
1549 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1550
b23c8439
PZ
1551 dwc->start_config_issued = false;
1552
72246da4
FB
1553 /* Start with SuperSpeed Default */
1554 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1555
1556 dep = dwc->eps[0];
4b345c9a 1557 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
72246da4
FB
1558 if (ret) {
1559 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
734b2fe9 1560 goto err2;
72246da4
FB
1561 }
1562
1563 dep = dwc->eps[1];
4b345c9a 1564 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
72246da4
FB
1565 if (ret) {
1566 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
734b2fe9 1567 goto err3;
72246da4
FB
1568 }
1569
1570 /* begin to receive SETUP packets */
c7fcdeb2 1571 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1572 dwc3_ep0_out_start(dwc);
1573
8698e2ac
FB
1574 dwc3_gadget_enable_irq(dwc);
1575
72246da4
FB
1576 spin_unlock_irqrestore(&dwc->lock, flags);
1577
1578 return 0;
1579
734b2fe9 1580err3:
72246da4
FB
1581 __dwc3_gadget_ep_disable(dwc->eps[0]);
1582
734b2fe9 1583err2:
003dda8a 1584 dwc->gadget_driver = NULL;
734b2fe9
FB
1585
1586err1:
72246da4
FB
1587 spin_unlock_irqrestore(&dwc->lock, flags);
1588
734b2fe9
FB
1589 free_irq(irq, dwc);
1590
1591err0:
72246da4
FB
1592 return ret;
1593}
1594
1595static int dwc3_gadget_stop(struct usb_gadget *g,
1596 struct usb_gadget_driver *driver)
1597{
1598 struct dwc3 *dwc = gadget_to_dwc(g);
1599 unsigned long flags;
8698e2ac 1600 int irq;
72246da4
FB
1601
1602 spin_lock_irqsave(&dwc->lock, flags);
1603
8698e2ac 1604 dwc3_gadget_disable_irq(dwc);
72246da4
FB
1605 __dwc3_gadget_ep_disable(dwc->eps[0]);
1606 __dwc3_gadget_ep_disable(dwc->eps[1]);
1607
1608 dwc->gadget_driver = NULL;
72246da4
FB
1609
1610 spin_unlock_irqrestore(&dwc->lock, flags);
1611
734b2fe9
FB
1612 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1613 free_irq(irq, dwc);
1614
72246da4
FB
1615 return 0;
1616}
802fde98 1617
72246da4
FB
1618static const struct usb_gadget_ops dwc3_gadget_ops = {
1619 .get_frame = dwc3_gadget_get_frame,
1620 .wakeup = dwc3_gadget_wakeup,
1621 .set_selfpowered = dwc3_gadget_set_selfpowered,
1622 .pullup = dwc3_gadget_pullup,
1623 .udc_start = dwc3_gadget_start,
1624 .udc_stop = dwc3_gadget_stop,
1625};
1626
1627/* -------------------------------------------------------------------------- */
1628
6a1e3ef4
FB
1629static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1630 u8 num, u32 direction)
72246da4
FB
1631{
1632 struct dwc3_ep *dep;
6a1e3ef4 1633 u8 i;
72246da4 1634
6a1e3ef4
FB
1635 for (i = 0; i < num; i++) {
1636 u8 epnum = (i << 1) | (!!direction);
72246da4 1637
72246da4
FB
1638 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1639 if (!dep) {
1640 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1641 epnum);
1642 return -ENOMEM;
1643 }
1644
1645 dep->dwc = dwc;
1646 dep->number = epnum;
1647 dwc->eps[epnum] = dep;
1648
1649 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1650 (epnum & 1) ? "in" : "out");
6a1e3ef4 1651
72246da4
FB
1652 dep->endpoint.name = dep->name;
1653 dep->direction = (epnum & 1);
1654
1655 if (epnum == 0 || epnum == 1) {
1656 dep->endpoint.maxpacket = 512;
6048e4c6 1657 dep->endpoint.maxburst = 1;
72246da4
FB
1658 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1659 if (!epnum)
1660 dwc->gadget.ep0 = &dep->endpoint;
1661 } else {
1662 int ret;
1663
1664 dep->endpoint.maxpacket = 1024;
12d36c16 1665 dep->endpoint.max_streams = 15;
72246da4
FB
1666 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1667 list_add_tail(&dep->endpoint.ep_list,
1668 &dwc->gadget.ep_list);
1669
1670 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1671 if (ret)
72246da4 1672 return ret;
72246da4 1673 }
25b8ff68 1674
72246da4
FB
1675 INIT_LIST_HEAD(&dep->request_list);
1676 INIT_LIST_HEAD(&dep->req_queued);
1677 }
1678
1679 return 0;
1680}
1681
6a1e3ef4
FB
1682static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1683{
1684 int ret;
1685
1686 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1687
1688 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1689 if (ret < 0) {
1690 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1691 return ret;
1692 }
1693
1694 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1695 if (ret < 0) {
1696 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1697 return ret;
1698 }
1699
1700 return 0;
1701}
1702
72246da4
FB
1703static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1704{
1705 struct dwc3_ep *dep;
1706 u8 epnum;
1707
1708 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1709 dep = dwc->eps[epnum];
6a1e3ef4
FB
1710 if (!dep)
1711 continue;
5bf8fae3
GC
1712 /*
1713 * Physical endpoints 0 and 1 are special; they form the
1714 * bi-directional USB endpoint 0.
1715 *
1716 * For those two physical endpoints, we don't allocate a TRB
1717 * pool nor do we add them the endpoints list. Due to that, we
1718 * shouldn't do these two operations otherwise we would end up
1719 * with all sorts of bugs when removing dwc3.ko.
1720 */
1721 if (epnum != 0 && epnum != 1) {
1722 dwc3_free_trb_pool(dep);
72246da4 1723 list_del(&dep->endpoint.ep_list);
5bf8fae3 1724 }
72246da4
FB
1725
1726 kfree(dep);
1727 }
1728}
1729
72246da4 1730/* -------------------------------------------------------------------------- */
e5caff68 1731
e5ba5ec8
PA
1732static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1733 struct dwc3_request *req, struct dwc3_trb *trb,
72246da4
FB
1734 const struct dwc3_event_depevt *event, int status)
1735{
72246da4
FB
1736 unsigned int count;
1737 unsigned int s_pkt = 0;
d6d6ec7b 1738 unsigned int trb_status;
72246da4 1739
e5ba5ec8
PA
1740 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1741 /*
1742 * We continue despite the error. There is not much we
1743 * can do. If we don't clean it up we loop forever. If
1744 * we skip the TRB then it gets overwritten after a
1745 * while since we use them in a ring buffer. A BUG()
1746 * would help. Lets hope that if this occurs, someone
1747 * fixes the root cause instead of looking away :)
1748 */
1749 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1750 dep->name, trb);
1751 count = trb->size & DWC3_TRB_SIZE_MASK;
1752
1753 if (dep->direction) {
1754 if (count) {
1755 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1756 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1757 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1758 dep->name);
1759 /*
1760 * If missed isoc occurred and there is
1761 * no request queued then issue END
1762 * TRANSFER, so that core generates
1763 * next xfernotready and we will issue
1764 * a fresh START TRANSFER.
1765 * If there are still queued request
1766 * then wait, do not issue either END
1767 * or UPDATE TRANSFER, just attach next
1768 * request in request_list during
1769 * giveback.If any future queued request
1770 * is successfully transferred then we
1771 * will issue UPDATE TRANSFER for all
1772 * request in the request_list.
1773 */
1774 dep->flags |= DWC3_EP_MISSED_ISOC;
1775 } else {
1776 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1777 dep->name);
1778 status = -ECONNRESET;
1779 }
1780 } else {
1781 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1782 }
1783 } else {
1784 if (count && (event->status & DEPEVT_STATUS_SHORT))
1785 s_pkt = 1;
1786 }
1787
1788 /*
1789 * We assume here we will always receive the entire data block
1790 * which we should receive. Meaning, if we program RX to
1791 * receive 4K but we receive only 2K, we assume that's all we
1792 * should receive and we simply bounce the request back to the
1793 * gadget driver for further processing.
1794 */
1795 req->request.actual += req->request.length - count;
1796 if (s_pkt)
1797 return 1;
1798 if ((event->status & DEPEVT_STATUS_LST) &&
1799 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1800 DWC3_TRB_CTRL_HWO)))
1801 return 1;
1802 if ((event->status & DEPEVT_STATUS_IOC) &&
1803 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1804 return 1;
1805 return 0;
1806}
1807
1808static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1809 const struct dwc3_event_depevt *event, int status)
1810{
1811 struct dwc3_request *req;
1812 struct dwc3_trb *trb;
1813 unsigned int slot;
1814 unsigned int i;
1815 int ret;
1816
72246da4
FB
1817 do {
1818 req = next_request(&dep->req_queued);
d39ee7be
SAS
1819 if (!req) {
1820 WARN_ON_ONCE(1);
1821 return 1;
1822 }
e5ba5ec8
PA
1823 i = 0;
1824 do {
1825 slot = req->start_slot + i;
1826 if ((slot == DWC3_TRB_NUM - 1) &&
1827 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1828 slot++;
1829 slot %= DWC3_TRB_NUM;
1830 trb = &dep->trb_pool[slot];
72246da4 1831
e5ba5ec8
PA
1832 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1833 event, status);
1834 if (ret)
1835 break;
1836 }while (++i < req->request.num_mapped_sgs);
72246da4 1837
72246da4 1838 dwc3_gadget_giveback(dep, req, status);
e5ba5ec8
PA
1839
1840 if (ret)
72246da4
FB
1841 break;
1842 } while (1);
1843
cdc359dd
PA
1844 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1845 list_empty(&dep->req_queued)) {
1846 if (list_empty(&dep->request_list)) {
1847 /*
1848 * If there is no entry in request list then do
1849 * not issue END TRANSFER now. Just set PENDING
1850 * flag, so that END TRANSFER is issued when an
1851 * entry is added into request list.
1852 */
1853 dep->flags = DWC3_EP_PENDING_REQUEST;
1854 } else {
1855 dwc3_stop_active_transfer(dwc, dep->number);
1856 dep->flags = DWC3_EP_ENABLED;
1857 }
7efea86c
PA
1858 return 1;
1859 }
1860
f6bafc6a
FB
1861 if ((event->status & DEPEVT_STATUS_IOC) &&
1862 (trb->ctrl & DWC3_TRB_CTRL_IOC))
72246da4
FB
1863 return 0;
1864 return 1;
1865}
1866
1867static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1868 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1869 int start_new)
1870{
1871 unsigned status = 0;
1872 int clean_busy;
1873
1874 if (event->status & DEPEVT_STATUS_BUSERR)
1875 status = -ECONNRESET;
1876
1d046793 1877 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
c2df85ca 1878 if (clean_busy)
72246da4 1879 dep->flags &= ~DWC3_EP_BUSY;
fae2b904
FB
1880
1881 /*
1882 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1883 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1884 */
1885 if (dwc->revision < DWC3_REVISION_183A) {
1886 u32 reg;
1887 int i;
1888
1889 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
348e026f 1890 dep = dwc->eps[i];
fae2b904
FB
1891
1892 if (!(dep->flags & DWC3_EP_ENABLED))
1893 continue;
1894
1895 if (!list_empty(&dep->req_queued))
1896 return;
1897 }
1898
1899 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1900 reg |= dwc->u1u2;
1901 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1902
1903 dwc->u1u2 = 0;
1904 }
72246da4
FB
1905}
1906
72246da4
FB
1907static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1908 const struct dwc3_event_depevt *event)
1909{
1910 struct dwc3_ep *dep;
1911 u8 epnum = event->endpoint_number;
1912
1913 dep = dwc->eps[epnum];
1914
3336abb5
FB
1915 if (!(dep->flags & DWC3_EP_ENABLED))
1916 return;
1917
72246da4
FB
1918 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1919 dwc3_ep_event_string(event->endpoint_event));
1920
1921 if (epnum == 0 || epnum == 1) {
1922 dwc3_ep0_interrupt(dwc, event);
1923 return;
1924 }
1925
1926 switch (event->endpoint_event) {
1927 case DWC3_DEPEVT_XFERCOMPLETE:
b4996a86 1928 dep->resource_index = 0;
c2df85ca 1929
16e78db7 1930 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1931 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1932 dep->name);
1933 return;
1934 }
1935
1936 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1937 break;
1938 case DWC3_DEPEVT_XFERINPROGRESS:
16e78db7 1939 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1940 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1941 dep->name);
1942 return;
1943 }
1944
1945 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1946 break;
1947 case DWC3_DEPEVT_XFERNOTREADY:
16e78db7 1948 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1949 dwc3_gadget_start_isoc(dwc, dep, event);
1950 } else {
1951 int ret;
1952
1953 dev_vdbg(dwc->dev, "%s: reason %s\n",
40aa41fb
FB
1954 dep->name, event->status &
1955 DEPEVT_STATUS_TRANSFER_ACTIVE
72246da4
FB
1956 ? "Transfer Active"
1957 : "Transfer Not Active");
1958
1959 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1960 if (!ret || ret == -EBUSY)
1961 return;
1962
1963 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1964 dep->name);
1965 }
1966
879631aa
FB
1967 break;
1968 case DWC3_DEPEVT_STREAMEVT:
16e78db7 1969 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
879631aa
FB
1970 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1971 dep->name);
1972 return;
1973 }
1974
1975 switch (event->status) {
1976 case DEPEVT_STREAMEVT_FOUND:
1977 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1978 event->parameters);
1979
1980 break;
1981 case DEPEVT_STREAMEVT_NOTFOUND:
1982 /* FALLTHROUGH */
1983 default:
1984 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1985 }
72246da4
FB
1986 break;
1987 case DWC3_DEPEVT_RXTXFIFOEVT:
1988 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1989 break;
72246da4 1990 case DWC3_DEPEVT_EPCMDCMPLT:
ea53b882 1991 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
72246da4
FB
1992 break;
1993 }
1994}
1995
1996static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1997{
1998 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1999 spin_unlock(&dwc->lock);
2000 dwc->gadget_driver->disconnect(&dwc->gadget);
2001 spin_lock(&dwc->lock);
2002 }
2003}
2004
2005static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
2006{
2007 struct dwc3_ep *dep;
2008 struct dwc3_gadget_ep_cmd_params params;
2009 u32 cmd;
2010 int ret;
2011
2012 dep = dwc->eps[epnum];
2013
b4996a86 2014 if (!dep->resource_index)
3daf74d7
PA
2015 return;
2016
57911504
PA
2017 /*
2018 * NOTICE: We are violating what the Databook says about the
2019 * EndTransfer command. Ideally we would _always_ wait for the
2020 * EndTransfer Command Completion IRQ, but that's causing too
2021 * much trouble synchronizing between us and gadget driver.
2022 *
2023 * We have discussed this with the IP Provider and it was
2024 * suggested to giveback all requests here, but give HW some
2025 * extra time to synchronize with the interconnect. We're using
2026 * an arbitraty 100us delay for that.
2027 *
2028 * Note also that a similar handling was tested by Synopsys
2029 * (thanks a lot Paul) and nothing bad has come out of it.
2030 * In short, what we're doing is:
2031 *
2032 * - Issue EndTransfer WITH CMDIOC bit set
2033 * - Wait 100us
2034 */
2035
3daf74d7
PA
2036 cmd = DWC3_DEPCMD_ENDTRANSFER;
2037 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
b4996a86 2038 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3daf74d7
PA
2039 memset(&params, 0, sizeof(params));
2040 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2041 WARN_ON_ONCE(ret);
b4996a86 2042 dep->resource_index = 0;
041d81f4 2043 dep->flags &= ~DWC3_EP_BUSY;
57911504 2044 udelay(100);
72246da4
FB
2045}
2046
2047static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2048{
2049 u32 epnum;
2050
2051 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2052 struct dwc3_ep *dep;
2053
2054 dep = dwc->eps[epnum];
6a1e3ef4
FB
2055 if (!dep)
2056 continue;
2057
72246da4
FB
2058 if (!(dep->flags & DWC3_EP_ENABLED))
2059 continue;
2060
624407f9 2061 dwc3_remove_requests(dwc, dep);
72246da4
FB
2062 }
2063}
2064
2065static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2066{
2067 u32 epnum;
2068
2069 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2070 struct dwc3_ep *dep;
2071 struct dwc3_gadget_ep_cmd_params params;
2072 int ret;
2073
2074 dep = dwc->eps[epnum];
6a1e3ef4
FB
2075 if (!dep)
2076 continue;
72246da4
FB
2077
2078 if (!(dep->flags & DWC3_EP_STALL))
2079 continue;
2080
2081 dep->flags &= ~DWC3_EP_STALL;
2082
2083 memset(&params, 0, sizeof(params));
2084 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2085 DWC3_DEPCMD_CLEARSTALL, &params);
2086 WARN_ON_ONCE(ret);
2087 }
2088}
2089
2090static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2091{
c4430a26
FB
2092 int reg;
2093
72246da4 2094 dev_vdbg(dwc->dev, "%s\n", __func__);
72246da4
FB
2095
2096 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2097 reg &= ~DWC3_DCTL_INITU1ENA;
2098 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2099
2100 reg &= ~DWC3_DCTL_INITU2ENA;
2101 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
72246da4 2102
72246da4 2103 dwc3_disconnect_gadget(dwc);
b23c8439 2104 dwc->start_config_issued = false;
72246da4
FB
2105
2106 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 2107 dwc->setup_packet_pending = false;
72246da4
FB
2108}
2109
d7a46a8d 2110static void dwc3_gadget_usb3_phy_suspend(struct dwc3 *dwc, int suspend)
72246da4
FB
2111{
2112 u32 reg;
2113
2114 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
2115
d7a46a8d 2116 if (suspend)
72246da4 2117 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
d7a46a8d
PZ
2118 else
2119 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
72246da4
FB
2120
2121 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
2122}
2123
d7a46a8d 2124static void dwc3_gadget_usb2_phy_suspend(struct dwc3 *dwc, int suspend)
72246da4
FB
2125{
2126 u32 reg;
2127
2128 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
2129
d7a46a8d 2130 if (suspend)
72246da4 2131 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
d7a46a8d
PZ
2132 else
2133 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
72246da4
FB
2134
2135 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
2136}
2137
2138static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2139{
2140 u32 reg;
2141
2142 dev_vdbg(dwc->dev, "%s\n", __func__);
2143
df62df56
FB
2144 /*
2145 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2146 * would cause a missing Disconnect Event if there's a
2147 * pending Setup Packet in the FIFO.
2148 *
2149 * There's no suggested workaround on the official Bug
2150 * report, which states that "unless the driver/application
2151 * is doing any special handling of a disconnect event,
2152 * there is no functional issue".
2153 *
2154 * Unfortunately, it turns out that we _do_ some special
2155 * handling of a disconnect event, namely complete all
2156 * pending transfers, notify gadget driver of the
2157 * disconnection, and so on.
2158 *
2159 * Our suggested workaround is to follow the Disconnect
2160 * Event steps here, instead, based on a setup_packet_pending
2161 * flag. Such flag gets set whenever we have a XferNotReady
2162 * event on EP0 and gets cleared on XferComplete for the
2163 * same endpoint.
2164 *
2165 * Refers to:
2166 *
2167 * STAR#9000466709: RTL: Device : Disconnect event not
2168 * generated if setup packet pending in FIFO
2169 */
2170 if (dwc->revision < DWC3_REVISION_188A) {
2171 if (dwc->setup_packet_pending)
2172 dwc3_gadget_disconnect_interrupt(dwc);
2173 }
2174
961906ed 2175 /* after reset -> Default State */
14cd592f 2176 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
961906ed 2177
802fde98
PZ
2178 /* Recent versions support automatic phy suspend and don't need this */
2179 if (dwc->revision < DWC3_REVISION_194A) {
2180 /* Resume PHYs */
2181 dwc3_gadget_usb2_phy_suspend(dwc, false);
2182 dwc3_gadget_usb3_phy_suspend(dwc, false);
2183 }
72246da4
FB
2184
2185 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
2186 dwc3_disconnect_gadget(dwc);
2187
2188 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2189 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2190 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3b637367 2191 dwc->test_mode = false;
72246da4
FB
2192
2193 dwc3_stop_active_transfers(dwc);
2194 dwc3_clear_stall_all_ep(dwc);
b23c8439 2195 dwc->start_config_issued = false;
72246da4
FB
2196
2197 /* Reset device address to zero */
2198 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2199 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2200 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
2201}
2202
2203static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2204{
2205 u32 reg;
2206 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2207
2208 /*
2209 * We change the clock only at SS but I dunno why I would want to do
2210 * this. Maybe it becomes part of the power saving plan.
2211 */
2212
2213 if (speed != DWC3_DSTS_SUPERSPEED)
2214 return;
2215
2216 /*
2217 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2218 * each time on Connect Done.
2219 */
2220 if (!usb30_clock)
2221 return;
2222
2223 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2224 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2225 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2226}
2227
d7a46a8d 2228static void dwc3_gadget_phy_suspend(struct dwc3 *dwc, u8 speed)
72246da4
FB
2229{
2230 switch (speed) {
2231 case USB_SPEED_SUPER:
d7a46a8d 2232 dwc3_gadget_usb2_phy_suspend(dwc, true);
72246da4
FB
2233 break;
2234 case USB_SPEED_HIGH:
2235 case USB_SPEED_FULL:
2236 case USB_SPEED_LOW:
d7a46a8d 2237 dwc3_gadget_usb3_phy_suspend(dwc, true);
72246da4
FB
2238 break;
2239 }
2240}
2241
2242static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2243{
72246da4
FB
2244 struct dwc3_ep *dep;
2245 int ret;
2246 u32 reg;
2247 u8 speed;
2248
2249 dev_vdbg(dwc->dev, "%s\n", __func__);
2250
72246da4
FB
2251 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2252 speed = reg & DWC3_DSTS_CONNECTSPD;
2253 dwc->speed = speed;
2254
2255 dwc3_update_ram_clk_sel(dwc, speed);
2256
2257 switch (speed) {
2258 case DWC3_DCFG_SUPERSPEED:
05870c5b
FB
2259 /*
2260 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2261 * would cause a missing USB3 Reset event.
2262 *
2263 * In such situations, we should force a USB3 Reset
2264 * event by calling our dwc3_gadget_reset_interrupt()
2265 * routine.
2266 *
2267 * Refers to:
2268 *
2269 * STAR#9000483510: RTL: SS : USB3 reset event may
2270 * not be generated always when the link enters poll
2271 */
2272 if (dwc->revision < DWC3_REVISION_190A)
2273 dwc3_gadget_reset_interrupt(dwc);
2274
72246da4
FB
2275 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2276 dwc->gadget.ep0->maxpacket = 512;
2277 dwc->gadget.speed = USB_SPEED_SUPER;
2278 break;
2279 case DWC3_DCFG_HIGHSPEED:
2280 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2281 dwc->gadget.ep0->maxpacket = 64;
2282 dwc->gadget.speed = USB_SPEED_HIGH;
2283 break;
2284 case DWC3_DCFG_FULLSPEED2:
2285 case DWC3_DCFG_FULLSPEED1:
2286 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2287 dwc->gadget.ep0->maxpacket = 64;
2288 dwc->gadget.speed = USB_SPEED_FULL;
2289 break;
2290 case DWC3_DCFG_LOWSPEED:
2291 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2292 dwc->gadget.ep0->maxpacket = 8;
2293 dwc->gadget.speed = USB_SPEED_LOW;
2294 break;
2295 }
2296
2b758350
PA
2297 /* Enable USB2 LPM Capability */
2298
2299 if ((dwc->revision > DWC3_REVISION_194A)
2300 && (speed != DWC3_DCFG_SUPERSPEED)) {
2301 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2302 reg |= DWC3_DCFG_LPM_CAP;
2303 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2304
2305 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2306 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2307
1a947746
FB
2308 /*
2309 * TODO: This should be configurable. For now using
2310 * maximum allowed HIRD threshold value of 0b1100
2311 */
2312 reg |= DWC3_DCTL_HIRD_THRES(12);
2b758350
PA
2313
2314 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2315 }
2316
802fde98
PZ
2317 /* Recent versions support automatic phy suspend and don't need this */
2318 if (dwc->revision < DWC3_REVISION_194A) {
2319 /* Suspend unneeded PHY */
2320 dwc3_gadget_phy_suspend(dwc, dwc->gadget.speed);
2321 }
72246da4
FB
2322
2323 dep = dwc->eps[0];
4b345c9a 2324 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
72246da4
FB
2325 if (ret) {
2326 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2327 return;
2328 }
2329
2330 dep = dwc->eps[1];
4b345c9a 2331 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
72246da4
FB
2332 if (ret) {
2333 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2334 return;
2335 }
2336
2337 /*
2338 * Configure PHY via GUSB3PIPECTLn if required.
2339 *
2340 * Update GTXFIFOSIZn
2341 *
2342 * In both cases reset values should be sufficient.
2343 */
2344}
2345
2346static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2347{
2348 dev_vdbg(dwc->dev, "%s\n", __func__);
2349
2350 /*
2351 * TODO take core out of low power mode when that's
2352 * implemented.
2353 */
2354
2355 dwc->gadget_driver->resume(&dwc->gadget);
2356}
2357
2358static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2359 unsigned int evtinfo)
2360{
fae2b904 2361 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
0b0cc1cd
FB
2362 unsigned int pwropt;
2363
2364 /*
2365 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2366 * Hibernation mode enabled which would show up when device detects
2367 * host-initiated U3 exit.
2368 *
2369 * In that case, device will generate a Link State Change Interrupt
2370 * from U3 to RESUME which is only necessary if Hibernation is
2371 * configured in.
2372 *
2373 * There are no functional changes due to such spurious event and we
2374 * just need to ignore it.
2375 *
2376 * Refers to:
2377 *
2378 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2379 * operational mode
2380 */
2381 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2382 if ((dwc->revision < DWC3_REVISION_250A) &&
2383 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2384 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2385 (next == DWC3_LINK_STATE_RESUME)) {
2386 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2387 return;
2388 }
2389 }
fae2b904
FB
2390
2391 /*
2392 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2393 * on the link partner, the USB session might do multiple entry/exit
2394 * of low power states before a transfer takes place.
2395 *
2396 * Due to this problem, we might experience lower throughput. The
2397 * suggested workaround is to disable DCTL[12:9] bits if we're
2398 * transitioning from U1/U2 to U0 and enable those bits again
2399 * after a transfer completes and there are no pending transfers
2400 * on any of the enabled endpoints.
2401 *
2402 * This is the first half of that workaround.
2403 *
2404 * Refers to:
2405 *
2406 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2407 * core send LGO_Ux entering U0
2408 */
2409 if (dwc->revision < DWC3_REVISION_183A) {
2410 if (next == DWC3_LINK_STATE_U0) {
2411 u32 u1u2;
2412 u32 reg;
2413
2414 switch (dwc->link_state) {
2415 case DWC3_LINK_STATE_U1:
2416 case DWC3_LINK_STATE_U2:
2417 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2418 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2419 | DWC3_DCTL_ACCEPTU2ENA
2420 | DWC3_DCTL_INITU1ENA
2421 | DWC3_DCTL_ACCEPTU1ENA);
2422
2423 if (!dwc->u1u2)
2424 dwc->u1u2 = reg & u1u2;
2425
2426 reg &= ~u1u2;
2427
2428 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2429 break;
2430 default:
2431 /* do nothing */
2432 break;
2433 }
2434 }
2435 }
2436
2437 dwc->link_state = next;
019ac832
FB
2438
2439 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
72246da4
FB
2440}
2441
2442static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2443 const struct dwc3_event_devt *event)
2444{
2445 switch (event->type) {
2446 case DWC3_DEVICE_EVENT_DISCONNECT:
2447 dwc3_gadget_disconnect_interrupt(dwc);
2448 break;
2449 case DWC3_DEVICE_EVENT_RESET:
2450 dwc3_gadget_reset_interrupt(dwc);
2451 break;
2452 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2453 dwc3_gadget_conndone_interrupt(dwc);
2454 break;
2455 case DWC3_DEVICE_EVENT_WAKEUP:
2456 dwc3_gadget_wakeup_interrupt(dwc);
2457 break;
2458 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2459 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2460 break;
2461 case DWC3_DEVICE_EVENT_EOPF:
2462 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2463 break;
2464 case DWC3_DEVICE_EVENT_SOF:
2465 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2466 break;
2467 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2468 dev_vdbg(dwc->dev, "Erratic Error\n");
2469 break;
2470 case DWC3_DEVICE_EVENT_CMD_CMPL:
2471 dev_vdbg(dwc->dev, "Command Complete\n");
2472 break;
2473 case DWC3_DEVICE_EVENT_OVERFLOW:
2474 dev_vdbg(dwc->dev, "Overflow\n");
2475 break;
2476 default:
2477 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2478 }
2479}
2480
2481static void dwc3_process_event_entry(struct dwc3 *dwc,
2482 const union dwc3_event *event)
2483{
2484 /* Endpoint IRQ, handle it and return early */
2485 if (event->type.is_devspec == 0) {
2486 /* depevt */
2487 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2488 }
2489
2490 switch (event->type.type) {
2491 case DWC3_EVENT_TYPE_DEV:
2492 dwc3_gadget_interrupt(dwc, &event->devt);
2493 break;
2494 /* REVISIT what to do with Carkit and I2C events ? */
2495 default:
2496 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2497 }
2498}
2499
b15a762f
FB
2500static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2501{
2502 struct dwc3 *dwc = _dwc;
2503 unsigned long flags;
2504 irqreturn_t ret = IRQ_NONE;
2505 int i;
2506
2507 spin_lock_irqsave(&dwc->lock, flags);
2508
2509 for (i = 0; i < dwc->num_event_buffers; i++) {
2510 struct dwc3_event_buffer *evt;
2511 int left;
2512
2513 evt = dwc->ev_buffs[i];
2514 left = evt->count;
2515
2516 if (!(evt->flags & DWC3_EVENT_PENDING))
2517 continue;
2518
2519 while (left > 0) {
2520 union dwc3_event event;
2521
2522 event.raw = *(u32 *) (evt->buf + evt->lpos);
2523
2524 dwc3_process_event_entry(dwc, &event);
2525
2526 /*
2527 * FIXME we wrap around correctly to the next entry as
2528 * almost all entries are 4 bytes in size. There is one
2529 * entry which has 12 bytes which is a regular entry
2530 * followed by 8 bytes data. ATM I don't know how
2531 * things are organized if we get next to the a
2532 * boundary so I worry about that once we try to handle
2533 * that.
2534 */
2535 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2536 left -= 4;
2537
2538 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(i), 4);
2539 }
2540
2541 evt->count = 0;
2542 evt->flags &= ~DWC3_EVENT_PENDING;
2543 ret = IRQ_HANDLED;
2544 }
2545
2546 spin_unlock_irqrestore(&dwc->lock, flags);
2547
2548 return ret;
2549}
2550
72246da4
FB
2551static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2552{
2553 struct dwc3_event_buffer *evt;
72246da4
FB
2554 u32 count;
2555
b15a762f
FB
2556 evt = dwc->ev_buffs[buf];
2557
72246da4
FB
2558 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2559 count &= DWC3_GEVNTCOUNT_MASK;
2560 if (!count)
2561 return IRQ_NONE;
2562
b15a762f
FB
2563 evt->count = count;
2564 evt->flags |= DWC3_EVENT_PENDING;
72246da4 2565
b15a762f 2566 return IRQ_WAKE_THREAD;
72246da4
FB
2567}
2568
2569static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2570{
2571 struct dwc3 *dwc = _dwc;
2572 int i;
2573 irqreturn_t ret = IRQ_NONE;
2574
2575 spin_lock(&dwc->lock);
2576
9f622b2a 2577 for (i = 0; i < dwc->num_event_buffers; i++) {
72246da4
FB
2578 irqreturn_t status;
2579
2580 status = dwc3_process_event_buf(dwc, i);
b15a762f 2581 if (status == IRQ_WAKE_THREAD)
72246da4
FB
2582 ret = status;
2583 }
2584
2585 spin_unlock(&dwc->lock);
2586
2587 return ret;
2588}
2589
2590/**
2591 * dwc3_gadget_init - Initializes gadget related registers
1d046793 2592 * @dwc: pointer to our controller context structure
72246da4
FB
2593 *
2594 * Returns 0 on success otherwise negative errno.
2595 */
41ac7b3a 2596int dwc3_gadget_init(struct dwc3 *dwc)
72246da4
FB
2597{
2598 u32 reg;
2599 int ret;
72246da4
FB
2600
2601 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2602 &dwc->ctrl_req_addr, GFP_KERNEL);
2603 if (!dwc->ctrl_req) {
2604 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2605 ret = -ENOMEM;
2606 goto err0;
2607 }
2608
2609 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2610 &dwc->ep0_trb_addr, GFP_KERNEL);
2611 if (!dwc->ep0_trb) {
2612 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2613 ret = -ENOMEM;
2614 goto err1;
2615 }
2616
3ef35faf 2617 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
72246da4
FB
2618 if (!dwc->setup_buf) {
2619 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2620 ret = -ENOMEM;
2621 goto err2;
2622 }
2623
5812b1c2 2624 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
3ef35faf
FB
2625 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2626 GFP_KERNEL);
5812b1c2
FB
2627 if (!dwc->ep0_bounce) {
2628 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2629 ret = -ENOMEM;
2630 goto err3;
2631 }
2632
72246da4 2633 dwc->gadget.ops = &dwc3_gadget_ops;
d327ab5b 2634 dwc->gadget.max_speed = USB_SPEED_SUPER;
72246da4 2635 dwc->gadget.speed = USB_SPEED_UNKNOWN;
eeb720fb 2636 dwc->gadget.sg_supported = true;
72246da4
FB
2637 dwc->gadget.name = "dwc3-gadget";
2638
2639 /*
2640 * REVISIT: Here we should clear all pending IRQs to be
2641 * sure we're starting from a well known location.
2642 */
2643
2644 ret = dwc3_gadget_init_endpoints(dwc);
2645 if (ret)
5812b1c2 2646 goto err4;
72246da4 2647
e6a3b5e2
SAS
2648 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2649 reg |= DWC3_DCFG_LPM_CAP;
2650 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2651
8698e2ac 2652 /* Enable USB2 LPM and automatic phy suspend only on recent versions */
802fde98 2653 if (dwc->revision >= DWC3_REVISION_194A) {
dcae3573
PA
2654 dwc3_gadget_usb2_phy_suspend(dwc, false);
2655 dwc3_gadget_usb3_phy_suspend(dwc, false);
802fde98
PZ
2656 }
2657
72246da4
FB
2658 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2659 if (ret) {
2660 dev_err(dwc->dev, "failed to register udc\n");
8698e2ac 2661 goto err5;
72246da4
FB
2662 }
2663
2664 return 0;
2665
5812b1c2 2666err5:
72246da4
FB
2667 dwc3_gadget_free_endpoints(dwc);
2668
5812b1c2 2669err4:
3ef35faf
FB
2670 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2671 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2672
72246da4 2673err3:
0fc9a1be 2674 kfree(dwc->setup_buf);
72246da4
FB
2675
2676err2:
2677 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2678 dwc->ep0_trb, dwc->ep0_trb_addr);
2679
2680err1:
2681 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2682 dwc->ctrl_req, dwc->ctrl_req_addr);
2683
2684err0:
2685 return ret;
2686}
2687
7415f17c
FB
2688/* -------------------------------------------------------------------------- */
2689
72246da4
FB
2690void dwc3_gadget_exit(struct dwc3 *dwc)
2691{
72246da4 2692 usb_del_gadget_udc(&dwc->gadget);
72246da4 2693
72246da4
FB
2694 dwc3_gadget_free_endpoints(dwc);
2695
3ef35faf
FB
2696 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2697 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2698
0fc9a1be 2699 kfree(dwc->setup_buf);
72246da4
FB
2700
2701 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2702 dwc->ep0_trb, dwc->ep0_trb_addr);
2703
2704 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2705 dwc->ctrl_req, dwc->ctrl_req_addr);
72246da4 2706}
7415f17c
FB
2707
2708int dwc3_gadget_prepare(struct dwc3 *dwc)
2709{
2710 if (dwc->pullups_connected)
2711 dwc3_gadget_disable_irq(dwc);
2712
2713 return 0;
2714}
2715
2716void dwc3_gadget_complete(struct dwc3 *dwc)
2717{
2718 if (dwc->pullups_connected) {
2719 dwc3_gadget_enable_irq(dwc);
2720 dwc3_gadget_run_stop(dwc, true);
2721 }
2722}
2723
2724int dwc3_gadget_suspend(struct dwc3 *dwc)
2725{
2726 __dwc3_gadget_ep_disable(dwc->eps[0]);
2727 __dwc3_gadget_ep_disable(dwc->eps[1]);
2728
2729 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2730
2731 return 0;
2732}
2733
2734int dwc3_gadget_resume(struct dwc3 *dwc)
2735{
2736 struct dwc3_ep *dep;
2737 int ret;
2738
2739 /* Start with SuperSpeed Default */
2740 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2741
2742 dep = dwc->eps[0];
2743 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2744 if (ret)
2745 goto err0;
2746
2747 dep = dwc->eps[1];
2748 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2749 if (ret)
2750 goto err1;
2751
2752 /* begin to receive SETUP packets */
2753 dwc->ep0state = EP0_SETUP_PHASE;
2754 dwc3_ep0_out_start(dwc);
2755
2756 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2757
2758 return 0;
2759
2760err1:
2761 __dwc3_gadget_ep_disable(dwc->eps[0]);
2762
2763err0:
2764 return ret;
2765}