drivers/usb/gadget/lpc32xx_udc.c: adjust inconsistent IS_ERR and PTR_ERR
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / gadget / lpc32xx_udc.c
1 /*
2 * USB Gadget driver for LPC32xx
3 *
4 * Authors:
5 * Kevin Wells <kevin.wells@nxp.com>
6 * Mike James
7 * Roland Stigge <stigge@antcom.de>
8 *
9 * Copyright (C) 2006 Philips Semiconductors
10 * Copyright (C) 2009 NXP Semiconductors
11 * Copyright (C) 2012 Roland Stigge
12 *
13 * Note: This driver is based on original work done by Mike James for
14 * the LPC3180.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 */
30
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/platform_device.h>
34 #include <linux/delay.h>
35 #include <linux/ioport.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
40 #include <linux/interrupt.h>
41 #include <linux/proc_fs.h>
42 #include <linux/clk.h>
43 #include <linux/usb/ch9.h>
44 #include <linux/usb/gadget.h>
45 #include <linux/i2c.h>
46 #include <linux/kthread.h>
47 #include <linux/freezer.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/dmapool.h>
50 #include <linux/workqueue.h>
51 #include <linux/of.h>
52 #include <linux/usb/isp1301.h>
53
54 #include <asm/byteorder.h>
55 #include <mach/hardware.h>
56 #include <linux/io.h>
57 #include <asm/irq.h>
58 #include <asm/system.h>
59
60 #include <mach/platform.h>
61 #include <mach/irqs.h>
62 #include <mach/board.h>
63 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
64 #include <linux/debugfs.h>
65 #include <linux/seq_file.h>
66 #endif
67
68 /*
69 * USB device configuration structure
70 */
71 typedef void (*usc_chg_event)(int);
72 struct lpc32xx_usbd_cfg {
73 int vbus_drv_pol; /* 0=active low drive for VBUS via ISP1301 */
74 usc_chg_event conn_chgb; /* Connection change event (optional) */
75 usc_chg_event susp_chgb; /* Suspend/resume event (optional) */
76 usc_chg_event rmwk_chgb; /* Enable/disable remote wakeup */
77 };
78
79 /*
80 * controller driver data structures
81 */
82
83 /* 16 endpoints (not to be confused with 32 hardware endpoints) */
84 #define NUM_ENDPOINTS 16
85
86 /*
87 * IRQ indices make reading the code a little easier
88 */
89 #define IRQ_USB_LP 0
90 #define IRQ_USB_HP 1
91 #define IRQ_USB_DEVDMA 2
92 #define IRQ_USB_ATX 3
93
94 #define EP_OUT 0 /* RX (from host) */
95 #define EP_IN 1 /* TX (to host) */
96
97 /* Returns the interrupt mask for the selected hardware endpoint */
98 #define EP_MASK_SEL(ep, dir) (1 << (((ep) * 2) + dir))
99
100 #define EP_INT_TYPE 0
101 #define EP_ISO_TYPE 1
102 #define EP_BLK_TYPE 2
103 #define EP_CTL_TYPE 3
104
105 /* EP0 states */
106 #define WAIT_FOR_SETUP 0 /* Wait for setup packet */
107 #define DATA_IN 1 /* Expect dev->host transfer */
108 #define DATA_OUT 2 /* Expect host->dev transfer */
109
110 /* DD (DMA Descriptor) structure, requires word alignment, this is already
111 * defined in the LPC32XX USB device header file, but this version is slightly
112 * modified to tag some work data with each DMA descriptor. */
113 struct lpc32xx_usbd_dd_gad {
114 u32 dd_next_phy;
115 u32 dd_setup;
116 u32 dd_buffer_addr;
117 u32 dd_status;
118 u32 dd_iso_ps_mem_addr;
119 u32 this_dma;
120 u32 iso_status[6]; /* 5 spare */
121 u32 dd_next_v;
122 };
123
124 /*
125 * Logical endpoint structure
126 */
127 struct lpc32xx_ep {
128 struct usb_ep ep;
129 struct list_head queue;
130 struct lpc32xx_udc *udc;
131
132 u32 hwep_num_base; /* Physical hardware EP */
133 u32 hwep_num; /* Maps to hardware endpoint */
134 u32 maxpacket;
135 u32 lep;
136
137 bool is_in;
138 bool req_pending;
139 u32 eptype;
140
141 u32 totalints;
142
143 bool wedge;
144
145 const struct usb_endpoint_descriptor *desc;
146 };
147
148 /*
149 * Common UDC structure
150 */
151 struct lpc32xx_udc {
152 struct usb_gadget gadget;
153 struct usb_gadget_driver *driver;
154 struct platform_device *pdev;
155 struct device *dev;
156 struct dentry *pde;
157 spinlock_t lock;
158 struct i2c_client *isp1301_i2c_client;
159
160 /* Board and device specific */
161 struct lpc32xx_usbd_cfg *board;
162 u32 io_p_start;
163 u32 io_p_size;
164 void __iomem *udp_baseaddr;
165 int udp_irq[4];
166 struct clk *usb_pll_clk;
167 struct clk *usb_slv_clk;
168 struct clk *usb_otg_clk;
169
170 /* DMA support */
171 u32 *udca_v_base;
172 u32 udca_p_base;
173 struct dma_pool *dd_cache;
174
175 /* Common EP and control data */
176 u32 enabled_devints;
177 u32 enabled_hwepints;
178 u32 dev_status;
179 u32 realized_eps;
180
181 /* VBUS detection, pullup, and power flags */
182 u8 vbus;
183 u8 last_vbus;
184 int pullup;
185 int poweron;
186
187 /* Work queues related to I2C support */
188 struct work_struct pullup_job;
189 struct work_struct vbus_job;
190 struct work_struct power_job;
191
192 /* USB device peripheral - various */
193 struct lpc32xx_ep ep[NUM_ENDPOINTS];
194 bool enabled;
195 bool clocked;
196 bool suspended;
197 bool selfpowered;
198 int ep0state;
199 atomic_t enabled_ep_cnt;
200 wait_queue_head_t ep_disable_wait_queue;
201 };
202
203 /*
204 * Endpoint request
205 */
206 struct lpc32xx_request {
207 struct usb_request req;
208 struct list_head queue;
209 struct lpc32xx_usbd_dd_gad *dd_desc_ptr;
210 bool mapped;
211 bool send_zlp;
212 };
213
214 static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g)
215 {
216 return container_of(g, struct lpc32xx_udc, gadget);
217 }
218
219 #define ep_dbg(epp, fmt, arg...) \
220 dev_dbg(epp->udc->dev, "%s: " fmt, __func__, ## arg)
221 #define ep_err(epp, fmt, arg...) \
222 dev_err(epp->udc->dev, "%s: " fmt, __func__, ## arg)
223 #define ep_info(epp, fmt, arg...) \
224 dev_info(epp->udc->dev, "%s: " fmt, __func__, ## arg)
225 #define ep_warn(epp, fmt, arg...) \
226 dev_warn(epp->udc->dev, "%s:" fmt, __func__, ## arg)
227
228 #define UDCA_BUFF_SIZE (128)
229
230 /* TODO: When the clock framework is introduced in LPC32xx, IO_ADDRESS will
231 * be replaced with an inremap()ed pointer
232 * */
233 #define USB_CTRL IO_ADDRESS(LPC32XX_CLK_PM_BASE + 0x64)
234
235 /* USB_CTRL bit defines */
236 #define USB_SLAVE_HCLK_EN (1 << 24)
237 #define USB_HOST_NEED_CLK_EN (1 << 21)
238 #define USB_DEV_NEED_CLK_EN (1 << 22)
239
240 /**********************************************************************
241 * USB device controller register offsets
242 **********************************************************************/
243
244 #define USBD_DEVINTST(x) ((x) + 0x200)
245 #define USBD_DEVINTEN(x) ((x) + 0x204)
246 #define USBD_DEVINTCLR(x) ((x) + 0x208)
247 #define USBD_DEVINTSET(x) ((x) + 0x20C)
248 #define USBD_CMDCODE(x) ((x) + 0x210)
249 #define USBD_CMDDATA(x) ((x) + 0x214)
250 #define USBD_RXDATA(x) ((x) + 0x218)
251 #define USBD_TXDATA(x) ((x) + 0x21C)
252 #define USBD_RXPLEN(x) ((x) + 0x220)
253 #define USBD_TXPLEN(x) ((x) + 0x224)
254 #define USBD_CTRL(x) ((x) + 0x228)
255 #define USBD_DEVINTPRI(x) ((x) + 0x22C)
256 #define USBD_EPINTST(x) ((x) + 0x230)
257 #define USBD_EPINTEN(x) ((x) + 0x234)
258 #define USBD_EPINTCLR(x) ((x) + 0x238)
259 #define USBD_EPINTSET(x) ((x) + 0x23C)
260 #define USBD_EPINTPRI(x) ((x) + 0x240)
261 #define USBD_REEP(x) ((x) + 0x244)
262 #define USBD_EPIND(x) ((x) + 0x248)
263 #define USBD_EPMAXPSIZE(x) ((x) + 0x24C)
264 /* DMA support registers only below */
265 /* Set, clear, or get enabled state of the DMA request status. If
266 * enabled, an IN or OUT token will start a DMA transfer for the EP */
267 #define USBD_DMARST(x) ((x) + 0x250)
268 #define USBD_DMARCLR(x) ((x) + 0x254)
269 #define USBD_DMARSET(x) ((x) + 0x258)
270 /* DMA UDCA head pointer */
271 #define USBD_UDCAH(x) ((x) + 0x280)
272 /* EP DMA status, enable, and disable. This is used to specifically
273 * enabled or disable DMA for a specific EP */
274 #define USBD_EPDMAST(x) ((x) + 0x284)
275 #define USBD_EPDMAEN(x) ((x) + 0x288)
276 #define USBD_EPDMADIS(x) ((x) + 0x28C)
277 /* DMA master interrupts enable and pending interrupts */
278 #define USBD_DMAINTST(x) ((x) + 0x290)
279 #define USBD_DMAINTEN(x) ((x) + 0x294)
280 /* DMA end of transfer interrupt enable, disable, status */
281 #define USBD_EOTINTST(x) ((x) + 0x2A0)
282 #define USBD_EOTINTCLR(x) ((x) + 0x2A4)
283 #define USBD_EOTINTSET(x) ((x) + 0x2A8)
284 /* New DD request interrupt enable, disable, status */
285 #define USBD_NDDRTINTST(x) ((x) + 0x2AC)
286 #define USBD_NDDRTINTCLR(x) ((x) + 0x2B0)
287 #define USBD_NDDRTINTSET(x) ((x) + 0x2B4)
288 /* DMA error interrupt enable, disable, status */
289 #define USBD_SYSERRTINTST(x) ((x) + 0x2B8)
290 #define USBD_SYSERRTINTCLR(x) ((x) + 0x2BC)
291 #define USBD_SYSERRTINTSET(x) ((x) + 0x2C0)
292
293 /**********************************************************************
294 * USBD_DEVINTST/USBD_DEVINTEN/USBD_DEVINTCLR/USBD_DEVINTSET/
295 * USBD_DEVINTPRI register definitions
296 **********************************************************************/
297 #define USBD_ERR_INT (1 << 9)
298 #define USBD_EP_RLZED (1 << 8)
299 #define USBD_TXENDPKT (1 << 7)
300 #define USBD_RXENDPKT (1 << 6)
301 #define USBD_CDFULL (1 << 5)
302 #define USBD_CCEMPTY (1 << 4)
303 #define USBD_DEV_STAT (1 << 3)
304 #define USBD_EP_SLOW (1 << 2)
305 #define USBD_EP_FAST (1 << 1)
306 #define USBD_FRAME (1 << 0)
307
308 /**********************************************************************
309 * USBD_EPINTST/USBD_EPINTEN/USBD_EPINTCLR/USBD_EPINTSET/
310 * USBD_EPINTPRI register definitions
311 **********************************************************************/
312 /* End point selection macro (RX) */
313 #define USBD_RX_EP_SEL(e) (1 << ((e) << 1))
314
315 /* End point selection macro (TX) */
316 #define USBD_TX_EP_SEL(e) (1 << (((e) << 1) + 1))
317
318 /**********************************************************************
319 * USBD_REEP/USBD_DMARST/USBD_DMARCLR/USBD_DMARSET/USBD_EPDMAST/
320 * USBD_EPDMAEN/USBD_EPDMADIS/
321 * USBD_NDDRTINTST/USBD_NDDRTINTCLR/USBD_NDDRTINTSET/
322 * USBD_EOTINTST/USBD_EOTINTCLR/USBD_EOTINTSET/
323 * USBD_SYSERRTINTST/USBD_SYSERRTINTCLR/USBD_SYSERRTINTSET
324 * register definitions
325 **********************************************************************/
326 /* Endpoint selection macro */
327 #define USBD_EP_SEL(e) (1 << (e))
328
329 /**********************************************************************
330 * SBD_DMAINTST/USBD_DMAINTEN
331 **********************************************************************/
332 #define USBD_SYS_ERR_INT (1 << 2)
333 #define USBD_NEW_DD_INT (1 << 1)
334 #define USBD_EOT_INT (1 << 0)
335
336 /**********************************************************************
337 * USBD_RXPLEN register definitions
338 **********************************************************************/
339 #define USBD_PKT_RDY (1 << 11)
340 #define USBD_DV (1 << 10)
341 #define USBD_PK_LEN_MASK 0x3FF
342
343 /**********************************************************************
344 * USBD_CTRL register definitions
345 **********************************************************************/
346 #define USBD_LOG_ENDPOINT(e) ((e) << 2)
347 #define USBD_WR_EN (1 << 1)
348 #define USBD_RD_EN (1 << 0)
349
350 /**********************************************************************
351 * USBD_CMDCODE register definitions
352 **********************************************************************/
353 #define USBD_CMD_CODE(c) ((c) << 16)
354 #define USBD_CMD_PHASE(p) ((p) << 8)
355
356 /**********************************************************************
357 * USBD_DMARST/USBD_DMARCLR/USBD_DMARSET register definitions
358 **********************************************************************/
359 #define USBD_DMAEP(e) (1 << (e))
360
361 /* DD (DMA Descriptor) structure, requires word alignment */
362 struct lpc32xx_usbd_dd {
363 u32 *dd_next;
364 u32 dd_setup;
365 u32 dd_buffer_addr;
366 u32 dd_status;
367 u32 dd_iso_ps_mem_addr;
368 };
369
370 /* dd_setup bit defines */
371 #define DD_SETUP_ATLE_DMA_MODE 0x01
372 #define DD_SETUP_NEXT_DD_VALID 0x04
373 #define DD_SETUP_ISO_EP 0x10
374 #define DD_SETUP_PACKETLEN(n) (((n) & 0x7FF) << 5)
375 #define DD_SETUP_DMALENBYTES(n) (((n) & 0xFFFF) << 16)
376
377 /* dd_status bit defines */
378 #define DD_STATUS_DD_RETIRED 0x01
379 #define DD_STATUS_STS_MASK 0x1E
380 #define DD_STATUS_STS_NS 0x00 /* Not serviced */
381 #define DD_STATUS_STS_BS 0x02 /* Being serviced */
382 #define DD_STATUS_STS_NC 0x04 /* Normal completion */
383 #define DD_STATUS_STS_DUR 0x06 /* Data underrun (short packet) */
384 #define DD_STATUS_STS_DOR 0x08 /* Data overrun */
385 #define DD_STATUS_STS_SE 0x12 /* System error */
386 #define DD_STATUS_PKT_VAL 0x20 /* Packet valid */
387 #define DD_STATUS_LSB_EX 0x40 /* LS byte extracted (ATLE) */
388 #define DD_STATUS_MSB_EX 0x80 /* MS byte extracted (ATLE) */
389 #define DD_STATUS_MLEN(n) (((n) >> 8) & 0x3F)
390 #define DD_STATUS_CURDMACNT(n) (((n) >> 16) & 0xFFFF)
391
392 /*
393 *
394 * Protocol engine bits below
395 *
396 */
397 /* Device Interrupt Bit Definitions */
398 #define FRAME_INT 0x00000001
399 #define EP_FAST_INT 0x00000002
400 #define EP_SLOW_INT 0x00000004
401 #define DEV_STAT_INT 0x00000008
402 #define CCEMTY_INT 0x00000010
403 #define CDFULL_INT 0x00000020
404 #define RxENDPKT_INT 0x00000040
405 #define TxENDPKT_INT 0x00000080
406 #define EP_RLZED_INT 0x00000100
407 #define ERR_INT 0x00000200
408
409 /* Rx & Tx Packet Length Definitions */
410 #define PKT_LNGTH_MASK 0x000003FF
411 #define PKT_DV 0x00000400
412 #define PKT_RDY 0x00000800
413
414 /* USB Control Definitions */
415 #define CTRL_RD_EN 0x00000001
416 #define CTRL_WR_EN 0x00000002
417
418 /* Command Codes */
419 #define CMD_SET_ADDR 0x00D00500
420 #define CMD_CFG_DEV 0x00D80500
421 #define CMD_SET_MODE 0x00F30500
422 #define CMD_RD_FRAME 0x00F50500
423 #define DAT_RD_FRAME 0x00F50200
424 #define CMD_RD_TEST 0x00FD0500
425 #define DAT_RD_TEST 0x00FD0200
426 #define CMD_SET_DEV_STAT 0x00FE0500
427 #define CMD_GET_DEV_STAT 0x00FE0500
428 #define DAT_GET_DEV_STAT 0x00FE0200
429 #define CMD_GET_ERR_CODE 0x00FF0500
430 #define DAT_GET_ERR_CODE 0x00FF0200
431 #define CMD_RD_ERR_STAT 0x00FB0500
432 #define DAT_RD_ERR_STAT 0x00FB0200
433 #define DAT_WR_BYTE(x) (0x00000100 | ((x) << 16))
434 #define CMD_SEL_EP(x) (0x00000500 | ((x) << 16))
435 #define DAT_SEL_EP(x) (0x00000200 | ((x) << 16))
436 #define CMD_SEL_EP_CLRI(x) (0x00400500 | ((x) << 16))
437 #define DAT_SEL_EP_CLRI(x) (0x00400200 | ((x) << 16))
438 #define CMD_SET_EP_STAT(x) (0x00400500 | ((x) << 16))
439 #define CMD_CLR_BUF 0x00F20500
440 #define DAT_CLR_BUF 0x00F20200
441 #define CMD_VALID_BUF 0x00FA0500
442
443 /* Device Address Register Definitions */
444 #define DEV_ADDR_MASK 0x7F
445 #define DEV_EN 0x80
446
447 /* Device Configure Register Definitions */
448 #define CONF_DVICE 0x01
449
450 /* Device Mode Register Definitions */
451 #define AP_CLK 0x01
452 #define INAK_CI 0x02
453 #define INAK_CO 0x04
454 #define INAK_II 0x08
455 #define INAK_IO 0x10
456 #define INAK_BI 0x20
457 #define INAK_BO 0x40
458
459 /* Device Status Register Definitions */
460 #define DEV_CON 0x01
461 #define DEV_CON_CH 0x02
462 #define DEV_SUS 0x04
463 #define DEV_SUS_CH 0x08
464 #define DEV_RST 0x10
465
466 /* Error Code Register Definitions */
467 #define ERR_EC_MASK 0x0F
468 #define ERR_EA 0x10
469
470 /* Error Status Register Definitions */
471 #define ERR_PID 0x01
472 #define ERR_UEPKT 0x02
473 #define ERR_DCRC 0x04
474 #define ERR_TIMOUT 0x08
475 #define ERR_EOP 0x10
476 #define ERR_B_OVRN 0x20
477 #define ERR_BTSTF 0x40
478 #define ERR_TGL 0x80
479
480 /* Endpoint Select Register Definitions */
481 #define EP_SEL_F 0x01
482 #define EP_SEL_ST 0x02
483 #define EP_SEL_STP 0x04
484 #define EP_SEL_PO 0x08
485 #define EP_SEL_EPN 0x10
486 #define EP_SEL_B_1_FULL 0x20
487 #define EP_SEL_B_2_FULL 0x40
488
489 /* Endpoint Status Register Definitions */
490 #define EP_STAT_ST 0x01
491 #define EP_STAT_DA 0x20
492 #define EP_STAT_RF_MO 0x40
493 #define EP_STAT_CND_ST 0x80
494
495 /* Clear Buffer Register Definitions */
496 #define CLR_BUF_PO 0x01
497
498 /* DMA Interrupt Bit Definitions */
499 #define EOT_INT 0x01
500 #define NDD_REQ_INT 0x02
501 #define SYS_ERR_INT 0x04
502
503 #define DRIVER_VERSION "1.03"
504 static const char driver_name[] = "lpc32xx_udc";
505
506 /*
507 *
508 * proc interface support
509 *
510 */
511 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
512 static char *epnames[] = {"INT", "ISO", "BULK", "CTRL"};
513 static const char debug_filename[] = "driver/udc";
514
515 static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep)
516 {
517 struct lpc32xx_request *req;
518
519 seq_printf(s, "\n");
520 seq_printf(s, "%12s, maxpacket %4d %3s",
521 ep->ep.name, ep->ep.maxpacket,
522 ep->is_in ? "in" : "out");
523 seq_printf(s, " type %4s", epnames[ep->eptype]);
524 seq_printf(s, " ints: %12d", ep->totalints);
525
526 if (list_empty(&ep->queue))
527 seq_printf(s, "\t(queue empty)\n");
528 else {
529 list_for_each_entry(req, &ep->queue, queue) {
530 u32 length = req->req.actual;
531
532 seq_printf(s, "\treq %p len %d/%d buf %p\n",
533 &req->req, length,
534 req->req.length, req->req.buf);
535 }
536 }
537 }
538
539 static int proc_udc_show(struct seq_file *s, void *unused)
540 {
541 struct lpc32xx_udc *udc = s->private;
542 struct lpc32xx_ep *ep;
543 unsigned long flags;
544
545 seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION);
546
547 spin_lock_irqsave(&udc->lock, flags);
548
549 seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n",
550 udc->vbus ? "present" : "off",
551 udc->enabled ? (udc->vbus ? "active" : "enabled") :
552 "disabled",
553 udc->selfpowered ? "self" : "VBUS",
554 udc->suspended ? ", suspended" : "",
555 udc->driver ? udc->driver->driver.name : "(none)");
556
557 if (udc->enabled && udc->vbus) {
558 proc_ep_show(s, &udc->ep[0]);
559 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
560 if (ep->desc)
561 proc_ep_show(s, ep);
562 }
563 }
564
565 spin_unlock_irqrestore(&udc->lock, flags);
566
567 return 0;
568 }
569
570 static int proc_udc_open(struct inode *inode, struct file *file)
571 {
572 return single_open(file, proc_udc_show, PDE(inode)->data);
573 }
574
575 static const struct file_operations proc_ops = {
576 .owner = THIS_MODULE,
577 .open = proc_udc_open,
578 .read = seq_read,
579 .llseek = seq_lseek,
580 .release = single_release,
581 };
582
583 static void create_debug_file(struct lpc32xx_udc *udc)
584 {
585 udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &proc_ops);
586 }
587
588 static void remove_debug_file(struct lpc32xx_udc *udc)
589 {
590 if (udc->pde)
591 debugfs_remove(udc->pde);
592 }
593
594 #else
595 static inline void create_debug_file(struct lpc32xx_udc *udc) {}
596 static inline void remove_debug_file(struct lpc32xx_udc *udc) {}
597 #endif
598
599 /* Primary initialization sequence for the ISP1301 transceiver */
600 static void isp1301_udc_configure(struct lpc32xx_udc *udc)
601 {
602 /* LPC32XX only supports DAT_SE0 USB mode */
603 /* This sequence is important */
604
605 /* Disable transparent UART mode first */
606 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
607 (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
608 MC1_UART_EN);
609
610 /* Set full speed and SE0 mode */
611 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
612 (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
613 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
614 ISP1301_I2C_MODE_CONTROL_1, (MC1_SPEED_REG | MC1_DAT_SE0));
615
616 /*
617 * The PSW_OE enable bit state is reversed in the ISP1301 User's Guide
618 */
619 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
620 (ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
621 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
622 ISP1301_I2C_MODE_CONTROL_2, (MC2_BI_DI | MC2_SPD_SUSP_CTRL));
623
624 /* Driver VBUS_DRV high or low depending on board setup */
625 if (udc->board->vbus_drv_pol != 0)
626 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
627 ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV);
628 else
629 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
630 ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
631 OTG1_VBUS_DRV);
632
633 /* Bi-directional mode with suspend control
634 * Enable both pulldowns for now - the pullup will be enable when VBUS
635 * is detected */
636 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
637 (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
638 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
639 ISP1301_I2C_OTG_CONTROL_1,
640 (0 | OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN));
641
642 /* Discharge VBUS (just in case) */
643 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
644 ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
645 msleep(1);
646 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
647 (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
648 OTG1_VBUS_DISCHRG);
649
650 /* Clear and enable VBUS high edge interrupt */
651 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
652 ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
653 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
654 ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
655 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
656 ISP1301_I2C_INTERRUPT_FALLING, INT_VBUS_VLD);
657 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
658 ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
659 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
660 ISP1301_I2C_INTERRUPT_RISING, INT_VBUS_VLD);
661
662 /* Enable usb_need_clk clock after transceiver is initialized */
663 writel((readl(USB_CTRL) | USB_DEV_NEED_CLK_EN), USB_CTRL);
664
665 dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n",
666 i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00));
667 dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n",
668 i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x02));
669 dev_info(udc->dev, "ISP1301 Version ID : 0x%04x\n",
670 i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x14));
671 }
672
673 /* Enables or disables the USB device pullup via the ISP1301 transceiver */
674 static void isp1301_pullup_set(struct lpc32xx_udc *udc)
675 {
676 if (udc->pullup)
677 /* Enable pullup for bus signalling */
678 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
679 ISP1301_I2C_OTG_CONTROL_1, OTG1_DP_PULLUP);
680 else
681 /* Enable pullup for bus signalling */
682 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
683 ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
684 OTG1_DP_PULLUP);
685 }
686
687 static void pullup_work(struct work_struct *work)
688 {
689 struct lpc32xx_udc *udc =
690 container_of(work, struct lpc32xx_udc, pullup_job);
691
692 isp1301_pullup_set(udc);
693 }
694
695 static void isp1301_pullup_enable(struct lpc32xx_udc *udc, int en_pullup,
696 int block)
697 {
698 if (en_pullup == udc->pullup)
699 return;
700
701 udc->pullup = en_pullup;
702 if (block)
703 isp1301_pullup_set(udc);
704 else
705 /* defer slow i2c pull up setting */
706 schedule_work(&udc->pullup_job);
707 }
708
709 #ifdef CONFIG_PM
710 /* Powers up or down the ISP1301 transceiver */
711 static void isp1301_set_powerstate(struct lpc32xx_udc *udc, int enable)
712 {
713 if (enable != 0)
714 /* Power up ISP1301 - this ISP1301 will automatically wakeup
715 when VBUS is detected */
716 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
717 ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR,
718 MC2_GLOBAL_PWR_DN);
719 else
720 /* Power down ISP1301 */
721 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
722 ISP1301_I2C_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
723 }
724
725 static void power_work(struct work_struct *work)
726 {
727 struct lpc32xx_udc *udc =
728 container_of(work, struct lpc32xx_udc, power_job);
729
730 isp1301_set_powerstate(udc, udc->poweron);
731 }
732 #endif
733
734 /*
735 *
736 * USB protocol engine command/data read/write helper functions
737 *
738 */
739 /* Issues a single command to the USB device state machine */
740 static void udc_protocol_cmd_w(struct lpc32xx_udc *udc, u32 cmd)
741 {
742 u32 pass = 0;
743 int to;
744
745 /* EP may lock on CLRI if this read isn't done */
746 u32 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
747 (void) tmp;
748
749 while (pass == 0) {
750 writel(USBD_CCEMPTY, USBD_DEVINTCLR(udc->udp_baseaddr));
751
752 /* Write command code */
753 writel(cmd, USBD_CMDCODE(udc->udp_baseaddr));
754 to = 10000;
755 while (((readl(USBD_DEVINTST(udc->udp_baseaddr)) &
756 USBD_CCEMPTY) == 0) && (to > 0)) {
757 to--;
758 }
759
760 if (to > 0)
761 pass = 1;
762
763 cpu_relax();
764 }
765 }
766
767 /* Issues 2 commands (or command and data) to the USB device state machine */
768 static inline void udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd,
769 u32 data)
770 {
771 udc_protocol_cmd_w(udc, cmd);
772 udc_protocol_cmd_w(udc, data);
773 }
774
775 /* Issues a single command to the USB device state machine and reads
776 * response data */
777 static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd)
778 {
779 u32 tmp;
780 int to = 1000;
781
782 /* Write a command and read data from the protocol engine */
783 writel((USBD_CDFULL | USBD_CCEMPTY),
784 USBD_DEVINTCLR(udc->udp_baseaddr));
785
786 /* Write command code */
787 udc_protocol_cmd_w(udc, cmd);
788
789 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
790 while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL))
791 && (to > 0))
792 to--;
793 if (!to)
794 dev_dbg(udc->dev,
795 "Protocol engine didn't receive response (CDFULL)\n");
796
797 return readl(USBD_CMDDATA(udc->udp_baseaddr));
798 }
799
800 /*
801 *
802 * USB device interrupt mask support functions
803 *
804 */
805 /* Enable one or more USB device interrupts */
806 static inline void uda_enable_devint(struct lpc32xx_udc *udc, u32 devmask)
807 {
808 udc->enabled_devints |= devmask;
809 writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
810 }
811
812 /* Disable one or more USB device interrupts */
813 static inline void uda_disable_devint(struct lpc32xx_udc *udc, u32 mask)
814 {
815 udc->enabled_devints &= ~mask;
816 writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
817 }
818
819 /* Clear one or more USB device interrupts */
820 static inline void uda_clear_devint(struct lpc32xx_udc *udc, u32 mask)
821 {
822 writel(mask, USBD_DEVINTCLR(udc->udp_baseaddr));
823 }
824
825 /*
826 *
827 * Endpoint interrupt disable/enable functions
828 *
829 */
830 /* Enable one or more USB endpoint interrupts */
831 static void uda_enable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
832 {
833 udc->enabled_hwepints |= (1 << hwep);
834 writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
835 }
836
837 /* Disable one or more USB endpoint interrupts */
838 static void uda_disable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
839 {
840 udc->enabled_hwepints &= ~(1 << hwep);
841 writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
842 }
843
844 /* Clear one or more USB endpoint interrupts */
845 static inline void uda_clear_hwepint(struct lpc32xx_udc *udc, u32 hwep)
846 {
847 writel((1 << hwep), USBD_EPINTCLR(udc->udp_baseaddr));
848 }
849
850 /* Enable DMA for the HW channel */
851 static inline void udc_ep_dma_enable(struct lpc32xx_udc *udc, u32 hwep)
852 {
853 writel((1 << hwep), USBD_EPDMAEN(udc->udp_baseaddr));
854 }
855
856 /* Disable DMA for the HW channel */
857 static inline void udc_ep_dma_disable(struct lpc32xx_udc *udc, u32 hwep)
858 {
859 writel((1 << hwep), USBD_EPDMADIS(udc->udp_baseaddr));
860 }
861
862 /*
863 *
864 * Endpoint realize/unrealize functions
865 *
866 */
867 /* Before an endpoint can be used, it needs to be realized
868 * in the USB protocol engine - this realizes the endpoint.
869 * The interrupt (FIFO or DMA) is not enabled with this function */
870 static void udc_realize_hwep(struct lpc32xx_udc *udc, u32 hwep,
871 u32 maxpacket)
872 {
873 int to = 1000;
874
875 writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
876 writel(hwep, USBD_EPIND(udc->udp_baseaddr));
877 udc->realized_eps |= (1 << hwep);
878 writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
879 writel(maxpacket, USBD_EPMAXPSIZE(udc->udp_baseaddr));
880
881 /* Wait until endpoint is realized in hardware */
882 while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) &
883 USBD_EP_RLZED)) && (to > 0))
884 to--;
885 if (!to)
886 dev_dbg(udc->dev, "EP not correctly realized in hardware\n");
887
888 writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
889 }
890
891 /* Unrealize an EP */
892 static void udc_unrealize_hwep(struct lpc32xx_udc *udc, u32 hwep)
893 {
894 udc->realized_eps &= ~(1 << hwep);
895 writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
896 }
897
898 /*
899 *
900 * Endpoint support functions
901 *
902 */
903 /* Select and clear endpoint interrupt */
904 static u32 udc_selep_clrint(struct lpc32xx_udc *udc, u32 hwep)
905 {
906 udc_protocol_cmd_w(udc, CMD_SEL_EP_CLRI(hwep));
907 return udc_protocol_cmd_r(udc, DAT_SEL_EP_CLRI(hwep));
908 }
909
910 /* Disables the endpoint in the USB protocol engine */
911 static void udc_disable_hwep(struct lpc32xx_udc *udc, u32 hwep)
912 {
913 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
914 DAT_WR_BYTE(EP_STAT_DA));
915 }
916
917 /* Stalls the endpoint - endpoint will return STALL */
918 static void udc_stall_hwep(struct lpc32xx_udc *udc, u32 hwep)
919 {
920 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
921 DAT_WR_BYTE(EP_STAT_ST));
922 }
923
924 /* Clear stall or reset endpoint */
925 static void udc_clrstall_hwep(struct lpc32xx_udc *udc, u32 hwep)
926 {
927 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
928 DAT_WR_BYTE(0));
929 }
930
931 /* Select an endpoint for endpoint status, clear, validate */
932 static void udc_select_hwep(struct lpc32xx_udc *udc, u32 hwep)
933 {
934 udc_protocol_cmd_w(udc, CMD_SEL_EP(hwep));
935 }
936
937 /*
938 *
939 * Endpoint buffer management functions
940 *
941 */
942 /* Clear the current endpoint's buffer */
943 static void udc_clr_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
944 {
945 udc_select_hwep(udc, hwep);
946 udc_protocol_cmd_w(udc, CMD_CLR_BUF);
947 }
948
949 /* Validate the current endpoint's buffer */
950 static void udc_val_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
951 {
952 udc_select_hwep(udc, hwep);
953 udc_protocol_cmd_w(udc, CMD_VALID_BUF);
954 }
955
956 static inline u32 udc_clearep_getsts(struct lpc32xx_udc *udc, u32 hwep)
957 {
958 /* Clear EP interrupt */
959 uda_clear_hwepint(udc, hwep);
960 return udc_selep_clrint(udc, hwep);
961 }
962
963 /*
964 *
965 * USB EP DMA support
966 *
967 */
968 /* Allocate a DMA Descriptor */
969 static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc)
970 {
971 dma_addr_t dma;
972 struct lpc32xx_usbd_dd_gad *dd;
973
974 dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc(
975 udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma);
976 if (dd)
977 dd->this_dma = dma;
978
979 return dd;
980 }
981
982 /* Free a DMA Descriptor */
983 static void udc_dd_free(struct lpc32xx_udc *udc, struct lpc32xx_usbd_dd_gad *dd)
984 {
985 dma_pool_free(udc->dd_cache, dd, dd->this_dma);
986 }
987
988 /*
989 *
990 * USB setup and shutdown functions
991 *
992 */
993 /* Enables or disables most of the USB system clocks when low power mode is
994 * needed. Clocks are typically started on a connection event, and disabled
995 * when a cable is disconnected */
996 static void udc_clk_set(struct lpc32xx_udc *udc, int enable)
997 {
998 if (enable != 0) {
999 if (udc->clocked)
1000 return;
1001
1002 udc->clocked = 1;
1003
1004 /* 48MHz PLL up */
1005 clk_enable(udc->usb_pll_clk);
1006
1007 /* Enable the USB device clock */
1008 writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN,
1009 USB_CTRL);
1010
1011 clk_enable(udc->usb_otg_clk);
1012 } else {
1013 if (!udc->clocked)
1014 return;
1015
1016 udc->clocked = 0;
1017
1018 /* Never disable the USB_HCLK during normal operation */
1019
1020 /* 48MHz PLL dpwn */
1021 clk_disable(udc->usb_pll_clk);
1022
1023 /* Disable the USB device clock */
1024 writel(readl(USB_CTRL) & ~USB_DEV_NEED_CLK_EN,
1025 USB_CTRL);
1026
1027 clk_disable(udc->usb_otg_clk);
1028 }
1029 }
1030
1031 /* Set/reset USB device address */
1032 static void udc_set_address(struct lpc32xx_udc *udc, u32 addr)
1033 {
1034 /* Address will be latched at the end of the status phase, or
1035 latched immediately if function is called twice */
1036 udc_protocol_cmd_data_w(udc, CMD_SET_ADDR,
1037 DAT_WR_BYTE(DEV_EN | addr));
1038 }
1039
1040 /* Setup up a IN request for DMA transfer - this consists of determining the
1041 * list of DMA addresses for the transfer, allocating DMA Descriptors,
1042 * installing the DD into the UDCA, and then enabling the DMA for that EP */
1043 static int udc_ep_in_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
1044 {
1045 struct lpc32xx_request *req;
1046 u32 hwep = ep->hwep_num;
1047
1048 ep->req_pending = 1;
1049
1050 /* There will always be a request waiting here */
1051 req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
1052
1053 /* Place the DD Descriptor into the UDCA */
1054 udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
1055
1056 /* Enable DMA and interrupt for the HW EP */
1057 udc_ep_dma_enable(udc, hwep);
1058
1059 /* Clear ZLP if last packet is not of MAXP size */
1060 if (req->req.length % ep->ep.maxpacket)
1061 req->send_zlp = 0;
1062
1063 return 0;
1064 }
1065
1066 /* Setup up a OUT request for DMA transfer - this consists of determining the
1067 * list of DMA addresses for the transfer, allocating DMA Descriptors,
1068 * installing the DD into the UDCA, and then enabling the DMA for that EP */
1069 static int udc_ep_out_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
1070 {
1071 struct lpc32xx_request *req;
1072 u32 hwep = ep->hwep_num;
1073
1074 ep->req_pending = 1;
1075
1076 /* There will always be a request waiting here */
1077 req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
1078
1079 /* Place the DD Descriptor into the UDCA */
1080 udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
1081
1082 /* Enable DMA and interrupt for the HW EP */
1083 udc_ep_dma_enable(udc, hwep);
1084 return 0;
1085 }
1086
1087 static void udc_disable(struct lpc32xx_udc *udc)
1088 {
1089 u32 i;
1090
1091 /* Disable device */
1092 udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
1093 udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(0));
1094
1095 /* Disable all device interrupts (including EP0) */
1096 uda_disable_devint(udc, 0x3FF);
1097
1098 /* Disable and reset all endpoint interrupts */
1099 for (i = 0; i < 32; i++) {
1100 uda_disable_hwepint(udc, i);
1101 uda_clear_hwepint(udc, i);
1102 udc_disable_hwep(udc, i);
1103 udc_unrealize_hwep(udc, i);
1104 udc->udca_v_base[i] = 0;
1105
1106 /* Disable and clear all interrupts and DMA */
1107 udc_ep_dma_disable(udc, i);
1108 writel((1 << i), USBD_EOTINTCLR(udc->udp_baseaddr));
1109 writel((1 << i), USBD_NDDRTINTCLR(udc->udp_baseaddr));
1110 writel((1 << i), USBD_SYSERRTINTCLR(udc->udp_baseaddr));
1111 writel((1 << i), USBD_DMARCLR(udc->udp_baseaddr));
1112 }
1113
1114 /* Disable DMA interrupts */
1115 writel(0, USBD_DMAINTEN(udc->udp_baseaddr));
1116
1117 writel(0, USBD_UDCAH(udc->udp_baseaddr));
1118 }
1119
1120 static void udc_enable(struct lpc32xx_udc *udc)
1121 {
1122 u32 i;
1123 struct lpc32xx_ep *ep = &udc->ep[0];
1124
1125 /* Start with known state */
1126 udc_disable(udc);
1127
1128 /* Enable device */
1129 udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(DEV_CON));
1130
1131 /* EP interrupts on high priority, FRAME interrupt on low priority */
1132 writel(USBD_EP_FAST, USBD_DEVINTPRI(udc->udp_baseaddr));
1133 writel(0xFFFF, USBD_EPINTPRI(udc->udp_baseaddr));
1134
1135 /* Clear any pending device interrupts */
1136 writel(0x3FF, USBD_DEVINTCLR(udc->udp_baseaddr));
1137
1138 /* Setup UDCA - not yet used (DMA) */
1139 writel(udc->udca_p_base, USBD_UDCAH(udc->udp_baseaddr));
1140
1141 /* Only enable EP0 in and out for now, EP0 only works in FIFO mode */
1142 for (i = 0; i <= 1; i++) {
1143 udc_realize_hwep(udc, i, ep->ep.maxpacket);
1144 uda_enable_hwepint(udc, i);
1145 udc_select_hwep(udc, i);
1146 udc_clrstall_hwep(udc, i);
1147 udc_clr_buffer_hwep(udc, i);
1148 }
1149
1150 /* Device interrupt setup */
1151 uda_clear_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
1152 USBD_EP_FAST));
1153 uda_enable_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
1154 USBD_EP_FAST));
1155
1156 /* Set device address to 0 - called twice to force a latch in the USB
1157 engine without the need of a setup packet status closure */
1158 udc_set_address(udc, 0);
1159 udc_set_address(udc, 0);
1160
1161 /* Enable master DMA interrupts */
1162 writel((USBD_SYS_ERR_INT | USBD_EOT_INT),
1163 USBD_DMAINTEN(udc->udp_baseaddr));
1164
1165 udc->dev_status = 0;
1166 }
1167
1168 /*
1169 *
1170 * USB device board specific events handled via callbacks
1171 *
1172 */
1173 /* Connection change event - notify board function of change */
1174 static void uda_power_event(struct lpc32xx_udc *udc, u32 conn)
1175 {
1176 /* Just notify of a connection change event (optional) */
1177 if (udc->board->conn_chgb != NULL)
1178 udc->board->conn_chgb(conn);
1179 }
1180
1181 /* Suspend/resume event - notify board function of change */
1182 static void uda_resm_susp_event(struct lpc32xx_udc *udc, u32 conn)
1183 {
1184 /* Just notify of a Suspend/resume change event (optional) */
1185 if (udc->board->susp_chgb != NULL)
1186 udc->board->susp_chgb(conn);
1187
1188 if (conn)
1189 udc->suspended = 0;
1190 else
1191 udc->suspended = 1;
1192 }
1193
1194 /* Remote wakeup enable/disable - notify board function of change */
1195 static void uda_remwkp_cgh(struct lpc32xx_udc *udc)
1196 {
1197 if (udc->board->rmwk_chgb != NULL)
1198 udc->board->rmwk_chgb(udc->dev_status &
1199 (1 << USB_DEVICE_REMOTE_WAKEUP));
1200 }
1201
1202 /* Reads data from FIFO, adjusts for alignment and data size */
1203 static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
1204 {
1205 int n, i, bl;
1206 u16 *p16;
1207 u32 *p32, tmp, cbytes;
1208
1209 /* Use optimal data transfer method based on source address and size */
1210 switch (((u32) data) & 0x3) {
1211 case 0: /* 32-bit aligned */
1212 p32 = (u32 *) data;
1213 cbytes = (bytes & ~0x3);
1214
1215 /* Copy 32-bit aligned data first */
1216 for (n = 0; n < cbytes; n += 4)
1217 *p32++ = readl(USBD_RXDATA(udc->udp_baseaddr));
1218
1219 /* Handle any remaining bytes */
1220 bl = bytes - cbytes;
1221 if (bl) {
1222 tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1223 for (n = 0; n < bl; n++)
1224 data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
1225
1226 }
1227 break;
1228
1229 case 1: /* 8-bit aligned */
1230 case 3:
1231 /* Each byte has to be handled independently */
1232 for (n = 0; n < bytes; n += 4) {
1233 tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1234
1235 bl = bytes - n;
1236 if (bl > 3)
1237 bl = 3;
1238
1239 for (i = 0; i < bl; i++)
1240 data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
1241 }
1242 break;
1243
1244 case 2: /* 16-bit aligned */
1245 p16 = (u16 *) data;
1246 cbytes = (bytes & ~0x3);
1247
1248 /* Copy 32-bit sized objects first with 16-bit alignment */
1249 for (n = 0; n < cbytes; n += 4) {
1250 tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1251 *p16++ = (u16)(tmp & 0xFFFF);
1252 *p16++ = (u16)((tmp >> 16) & 0xFFFF);
1253 }
1254
1255 /* Handle any remaining bytes */
1256 bl = bytes - cbytes;
1257 if (bl) {
1258 tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1259 for (n = 0; n < bl; n++)
1260 data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
1261 }
1262 break;
1263 }
1264 }
1265
1266 /* Read data from the FIFO for an endpoint. This function is for endpoints (such
1267 * as EP0) that don't use DMA. This function should only be called if a packet
1268 * is known to be ready to read for the endpoint. Note that the endpoint must
1269 * be selected in the protocol engine prior to this call. */
1270 static u32 udc_read_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
1271 u32 bytes)
1272 {
1273 u32 tmpv;
1274 int to = 1000;
1275 u32 tmp, hwrep = ((hwep & 0x1E) << 1) | CTRL_RD_EN;
1276
1277 /* Setup read of endpoint */
1278 writel(hwrep, USBD_CTRL(udc->udp_baseaddr));
1279
1280 /* Wait until packet is ready */
1281 while ((((tmpv = readl(USBD_RXPLEN(udc->udp_baseaddr))) &
1282 PKT_RDY) == 0) && (to > 0))
1283 to--;
1284 if (!to)
1285 dev_dbg(udc->dev, "No packet ready on FIFO EP read\n");
1286
1287 /* Mask out count */
1288 tmp = tmpv & PKT_LNGTH_MASK;
1289 if (bytes < tmp)
1290 tmp = bytes;
1291
1292 if ((tmp > 0) && (data != NULL))
1293 udc_pop_fifo(udc, (u8 *) data, tmp);
1294
1295 writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
1296
1297 /* Clear the buffer */
1298 udc_clr_buffer_hwep(udc, hwep);
1299
1300 return tmp;
1301 }
1302
1303 /* Stuffs data into the FIFO, adjusts for alignment and data size */
1304 static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
1305 {
1306 int n, i, bl;
1307 u16 *p16;
1308 u32 *p32, tmp, cbytes;
1309
1310 /* Use optimal data transfer method based on source address and size */
1311 switch (((u32) data) & 0x3) {
1312 case 0: /* 32-bit aligned */
1313 p32 = (u32 *) data;
1314 cbytes = (bytes & ~0x3);
1315
1316 /* Copy 32-bit aligned data first */
1317 for (n = 0; n < cbytes; n += 4)
1318 writel(*p32++, USBD_TXDATA(udc->udp_baseaddr));
1319
1320 /* Handle any remaining bytes */
1321 bl = bytes - cbytes;
1322 if (bl) {
1323 tmp = 0;
1324 for (n = 0; n < bl; n++)
1325 tmp |= data[cbytes + n] << (n * 8);
1326
1327 writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
1328 }
1329 break;
1330
1331 case 1: /* 8-bit aligned */
1332 case 3:
1333 /* Each byte has to be handled independently */
1334 for (n = 0; n < bytes; n += 4) {
1335 bl = bytes - n;
1336 if (bl > 4)
1337 bl = 4;
1338
1339 tmp = 0;
1340 for (i = 0; i < bl; i++)
1341 tmp |= data[n + i] << (i * 8);
1342
1343 writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
1344 }
1345 break;
1346
1347 case 2: /* 16-bit aligned */
1348 p16 = (u16 *) data;
1349 cbytes = (bytes & ~0x3);
1350
1351 /* Copy 32-bit aligned data first */
1352 for (n = 0; n < cbytes; n += 4) {
1353 tmp = *p16++ & 0xFFFF;
1354 tmp |= (*p16++ & 0xFFFF) << 16;
1355 writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
1356 }
1357
1358 /* Handle any remaining bytes */
1359 bl = bytes - cbytes;
1360 if (bl) {
1361 tmp = 0;
1362 for (n = 0; n < bl; n++)
1363 tmp |= data[cbytes + n] << (n * 8);
1364
1365 writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
1366 }
1367 break;
1368 }
1369 }
1370
1371 /* Write data to the FIFO for an endpoint. This function is for endpoints (such
1372 * as EP0) that don't use DMA. Note that the endpoint must be selected in the
1373 * protocol engine prior to this call. */
1374 static void udc_write_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
1375 u32 bytes)
1376 {
1377 u32 hwwep = ((hwep & 0x1E) << 1) | CTRL_WR_EN;
1378
1379 if ((bytes > 0) && (data == NULL))
1380 return;
1381
1382 /* Setup write of endpoint */
1383 writel(hwwep, USBD_CTRL(udc->udp_baseaddr));
1384
1385 writel(bytes, USBD_TXPLEN(udc->udp_baseaddr));
1386
1387 /* Need at least 1 byte to trigger TX */
1388 if (bytes == 0)
1389 writel(0, USBD_TXDATA(udc->udp_baseaddr));
1390 else
1391 udc_stuff_fifo(udc, (u8 *) data, bytes);
1392
1393 writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
1394
1395 udc_val_buffer_hwep(udc, hwep);
1396 }
1397
1398 /* USB device reset - resets USB to a default state with just EP0
1399 enabled */
1400 static void uda_usb_reset(struct lpc32xx_udc *udc)
1401 {
1402 u32 i = 0;
1403 /* Re-init device controller and EP0 */
1404 udc_enable(udc);
1405 udc->gadget.speed = USB_SPEED_FULL;
1406
1407 for (i = 1; i < NUM_ENDPOINTS; i++) {
1408 struct lpc32xx_ep *ep = &udc->ep[i];
1409 ep->req_pending = 0;
1410 }
1411 }
1412
1413 /* Send a ZLP on EP0 */
1414 static void udc_ep0_send_zlp(struct lpc32xx_udc *udc)
1415 {
1416 udc_write_hwep(udc, EP_IN, NULL, 0);
1417 }
1418
1419 /* Get current frame number */
1420 static u16 udc_get_current_frame(struct lpc32xx_udc *udc)
1421 {
1422 u16 flo, fhi;
1423
1424 udc_protocol_cmd_w(udc, CMD_RD_FRAME);
1425 flo = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
1426 fhi = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
1427
1428 return (fhi << 8) | flo;
1429 }
1430
1431 /* Set the device as configured - enables all endpoints */
1432 static inline void udc_set_device_configured(struct lpc32xx_udc *udc)
1433 {
1434 udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(CONF_DVICE));
1435 }
1436
1437 /* Set the device as unconfigured - disables all endpoints */
1438 static inline void udc_set_device_unconfigured(struct lpc32xx_udc *udc)
1439 {
1440 udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
1441 }
1442
1443 /* reinit == restore initial software state */
1444 static void udc_reinit(struct lpc32xx_udc *udc)
1445 {
1446 u32 i;
1447
1448 INIT_LIST_HEAD(&udc->gadget.ep_list);
1449 INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
1450
1451 for (i = 0; i < NUM_ENDPOINTS; i++) {
1452 struct lpc32xx_ep *ep = &udc->ep[i];
1453
1454 if (i != 0)
1455 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1456 ep->desc = NULL;
1457 ep->ep.maxpacket = ep->maxpacket;
1458 INIT_LIST_HEAD(&ep->queue);
1459 ep->req_pending = 0;
1460 }
1461
1462 udc->ep0state = WAIT_FOR_SETUP;
1463 }
1464
1465 /* Must be called with lock */
1466 static void done(struct lpc32xx_ep *ep, struct lpc32xx_request *req, int status)
1467 {
1468 struct lpc32xx_udc *udc = ep->udc;
1469
1470 list_del_init(&req->queue);
1471 if (req->req.status == -EINPROGRESS)
1472 req->req.status = status;
1473 else
1474 status = req->req.status;
1475
1476 if (ep->lep) {
1477 enum dma_data_direction direction;
1478
1479 if (ep->is_in)
1480 direction = DMA_TO_DEVICE;
1481 else
1482 direction = DMA_FROM_DEVICE;
1483
1484 if (req->mapped) {
1485 dma_unmap_single(ep->udc->gadget.dev.parent,
1486 req->req.dma, req->req.length,
1487 direction);
1488 req->req.dma = 0;
1489 req->mapped = 0;
1490 } else
1491 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
1492 req->req.dma, req->req.length,
1493 direction);
1494
1495 /* Free DDs */
1496 udc_dd_free(udc, req->dd_desc_ptr);
1497 }
1498
1499 if (status && status != -ESHUTDOWN)
1500 ep_dbg(ep, "%s done %p, status %d\n", ep->ep.name, req, status);
1501
1502 ep->req_pending = 0;
1503 spin_unlock(&udc->lock);
1504 req->req.complete(&ep->ep, &req->req);
1505 spin_lock(&udc->lock);
1506 }
1507
1508 /* Must be called with lock */
1509 static void nuke(struct lpc32xx_ep *ep, int status)
1510 {
1511 struct lpc32xx_request *req;
1512
1513 while (!list_empty(&ep->queue)) {
1514 req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
1515 done(ep, req, status);
1516 }
1517
1518 if (ep->desc && status == -ESHUTDOWN) {
1519 uda_disable_hwepint(ep->udc, ep->hwep_num);
1520 udc_disable_hwep(ep->udc, ep->hwep_num);
1521 }
1522 }
1523
1524 /* IN endpoint 0 transfer */
1525 static int udc_ep0_in_req(struct lpc32xx_udc *udc)
1526 {
1527 struct lpc32xx_request *req;
1528 struct lpc32xx_ep *ep0 = &udc->ep[0];
1529 u32 tsend, ts = 0;
1530
1531 if (list_empty(&ep0->queue))
1532 /* Nothing to send */
1533 return 0;
1534 else
1535 req = list_entry(ep0->queue.next, struct lpc32xx_request,
1536 queue);
1537
1538 tsend = ts = req->req.length - req->req.actual;
1539 if (ts == 0) {
1540 /* Send a ZLP */
1541 udc_ep0_send_zlp(udc);
1542 done(ep0, req, 0);
1543 return 1;
1544 } else if (ts > ep0->ep.maxpacket)
1545 ts = ep0->ep.maxpacket; /* Just send what we can */
1546
1547 /* Write data to the EP0 FIFO and start transfer */
1548 udc_write_hwep(udc, EP_IN, (req->req.buf + req->req.actual), ts);
1549
1550 /* Increment data pointer */
1551 req->req.actual += ts;
1552
1553 if (tsend >= ep0->ep.maxpacket)
1554 return 0; /* Stay in data transfer state */
1555
1556 /* Transfer request is complete */
1557 udc->ep0state = WAIT_FOR_SETUP;
1558 done(ep0, req, 0);
1559 return 1;
1560 }
1561
1562 /* OUT endpoint 0 transfer */
1563 static int udc_ep0_out_req(struct lpc32xx_udc *udc)
1564 {
1565 struct lpc32xx_request *req;
1566 struct lpc32xx_ep *ep0 = &udc->ep[0];
1567 u32 tr, bufferspace;
1568
1569 if (list_empty(&ep0->queue))
1570 return 0;
1571 else
1572 req = list_entry(ep0->queue.next, struct lpc32xx_request,
1573 queue);
1574
1575 if (req) {
1576 if (req->req.length == 0) {
1577 /* Just dequeue request */
1578 done(ep0, req, 0);
1579 udc->ep0state = WAIT_FOR_SETUP;
1580 return 1;
1581 }
1582
1583 /* Get data from FIFO */
1584 bufferspace = req->req.length - req->req.actual;
1585 if (bufferspace > ep0->ep.maxpacket)
1586 bufferspace = ep0->ep.maxpacket;
1587
1588 /* Copy data to buffer */
1589 prefetchw(req->req.buf + req->req.actual);
1590 tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
1591 bufferspace);
1592 req->req.actual += bufferspace;
1593
1594 if (tr < ep0->ep.maxpacket) {
1595 /* This is the last packet */
1596 done(ep0, req, 0);
1597 udc->ep0state = WAIT_FOR_SETUP;
1598 return 1;
1599 }
1600 }
1601
1602 return 0;
1603 }
1604
1605 /* Must be called with lock */
1606 static void stop_activity(struct lpc32xx_udc *udc)
1607 {
1608 struct usb_gadget_driver *driver = udc->driver;
1609 int i;
1610
1611 if (udc->gadget.speed == USB_SPEED_UNKNOWN)
1612 driver = NULL;
1613
1614 udc->gadget.speed = USB_SPEED_UNKNOWN;
1615 udc->suspended = 0;
1616
1617 for (i = 0; i < NUM_ENDPOINTS; i++) {
1618 struct lpc32xx_ep *ep = &udc->ep[i];
1619 nuke(ep, -ESHUTDOWN);
1620 }
1621 if (driver) {
1622 spin_unlock(&udc->lock);
1623 driver->disconnect(&udc->gadget);
1624 spin_lock(&udc->lock);
1625 }
1626
1627 isp1301_pullup_enable(udc, 0, 0);
1628 udc_disable(udc);
1629 udc_reinit(udc);
1630 }
1631
1632 /*
1633 * Activate or kill host pullup
1634 * Can be called with or without lock
1635 */
1636 static void pullup(struct lpc32xx_udc *udc, int is_on)
1637 {
1638 if (!udc->clocked)
1639 return;
1640
1641 if (!udc->enabled || !udc->vbus)
1642 is_on = 0;
1643
1644 if (is_on != udc->pullup)
1645 isp1301_pullup_enable(udc, is_on, 0);
1646 }
1647
1648 /* Must be called without lock */
1649 static int lpc32xx_ep_disable(struct usb_ep *_ep)
1650 {
1651 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
1652 struct lpc32xx_udc *udc = ep->udc;
1653 unsigned long flags;
1654
1655 if ((ep->hwep_num_base == 0) || (ep->hwep_num == 0))
1656 return -EINVAL;
1657 spin_lock_irqsave(&udc->lock, flags);
1658
1659 nuke(ep, -ESHUTDOWN);
1660
1661 /* restore the endpoint's pristine config */
1662 ep->desc = NULL;
1663
1664 /* Clear all DMA statuses for this EP */
1665 udc_ep_dma_disable(udc, ep->hwep_num);
1666 writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
1667 writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
1668 writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
1669 writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
1670
1671 /* Remove the DD pointer in the UDCA */
1672 udc->udca_v_base[ep->hwep_num] = 0;
1673
1674 /* Disable and reset endpoint and interrupt */
1675 uda_clear_hwepint(udc, ep->hwep_num);
1676 udc_unrealize_hwep(udc, ep->hwep_num);
1677
1678 ep->hwep_num = 0;
1679
1680 spin_unlock_irqrestore(&udc->lock, flags);
1681
1682 atomic_dec(&udc->enabled_ep_cnt);
1683 wake_up(&udc->ep_disable_wait_queue);
1684
1685 return 0;
1686 }
1687
1688 /* Must be called without lock */
1689 static int lpc32xx_ep_enable(struct usb_ep *_ep,
1690 const struct usb_endpoint_descriptor *desc)
1691 {
1692 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
1693 struct lpc32xx_udc *udc = ep->udc;
1694 u16 maxpacket;
1695 u32 tmp;
1696 unsigned long flags;
1697
1698 /* Verify EP data */
1699 if ((!_ep) || (!ep) || (!desc) || (ep->desc) ||
1700 (desc->bDescriptorType != USB_DT_ENDPOINT)) {
1701 dev_dbg(udc->dev, "bad ep or descriptor\n");
1702 return -EINVAL;
1703 }
1704 maxpacket = usb_endpoint_maxp(desc);
1705 if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) {
1706 dev_dbg(udc->dev, "bad ep descriptor's packet size\n");
1707 return -EINVAL;
1708 }
1709
1710 /* Don't touch EP0 */
1711 if (ep->hwep_num_base == 0) {
1712 dev_dbg(udc->dev, "Can't re-enable EP0!!!\n");
1713 return -EINVAL;
1714 }
1715
1716 /* Is driver ready? */
1717 if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
1718 dev_dbg(udc->dev, "bogus device state\n");
1719 return -ESHUTDOWN;
1720 }
1721
1722 tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
1723 switch (tmp) {
1724 case USB_ENDPOINT_XFER_CONTROL:
1725 return -EINVAL;
1726
1727 case USB_ENDPOINT_XFER_INT:
1728 if (maxpacket > ep->maxpacket) {
1729 dev_dbg(udc->dev,
1730 "Bad INT endpoint maxpacket %d\n", maxpacket);
1731 return -EINVAL;
1732 }
1733 break;
1734
1735 case USB_ENDPOINT_XFER_BULK:
1736 switch (maxpacket) {
1737 case 8:
1738 case 16:
1739 case 32:
1740 case 64:
1741 break;
1742
1743 default:
1744 dev_dbg(udc->dev,
1745 "Bad BULK endpoint maxpacket %d\n", maxpacket);
1746 return -EINVAL;
1747 }
1748 break;
1749
1750 case USB_ENDPOINT_XFER_ISOC:
1751 break;
1752 }
1753 spin_lock_irqsave(&udc->lock, flags);
1754
1755 /* Initialize endpoint to match the selected descriptor */
1756 ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
1757 ep->desc = desc;
1758 ep->ep.maxpacket = maxpacket;
1759
1760 /* Map hardware endpoint from base and direction */
1761 if (ep->is_in)
1762 /* IN endpoints are offset 1 from the OUT endpoint */
1763 ep->hwep_num = ep->hwep_num_base + EP_IN;
1764 else
1765 ep->hwep_num = ep->hwep_num_base;
1766
1767 ep_dbg(ep, "EP enabled: %s, HW:%d, MP:%d IN:%d\n", ep->ep.name,
1768 ep->hwep_num, maxpacket, (ep->is_in == 1));
1769
1770 /* Realize the endpoint, interrupt is enabled later when
1771 * buffers are queued, IN EPs will NAK until buffers are ready */
1772 udc_realize_hwep(udc, ep->hwep_num, ep->ep.maxpacket);
1773 udc_clr_buffer_hwep(udc, ep->hwep_num);
1774 uda_disable_hwepint(udc, ep->hwep_num);
1775 udc_clrstall_hwep(udc, ep->hwep_num);
1776
1777 /* Clear all DMA statuses for this EP */
1778 udc_ep_dma_disable(udc, ep->hwep_num);
1779 writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
1780 writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
1781 writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
1782 writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
1783
1784 spin_unlock_irqrestore(&udc->lock, flags);
1785
1786 atomic_inc(&udc->enabled_ep_cnt);
1787 return 0;
1788 }
1789
1790 /*
1791 * Allocate a USB request list
1792 * Can be called with or without lock
1793 */
1794 static struct usb_request *lpc32xx_ep_alloc_request(struct usb_ep *_ep,
1795 gfp_t gfp_flags)
1796 {
1797 struct lpc32xx_request *req;
1798
1799 req = kzalloc(sizeof(struct lpc32xx_request), gfp_flags);
1800 if (!req)
1801 return NULL;
1802
1803 INIT_LIST_HEAD(&req->queue);
1804 return &req->req;
1805 }
1806
1807 /*
1808 * De-allocate a USB request list
1809 * Can be called with or without lock
1810 */
1811 static void lpc32xx_ep_free_request(struct usb_ep *_ep,
1812 struct usb_request *_req)
1813 {
1814 struct lpc32xx_request *req;
1815
1816 req = container_of(_req, struct lpc32xx_request, req);
1817 BUG_ON(!list_empty(&req->queue));
1818 kfree(req);
1819 }
1820
1821 /* Must be called without lock */
1822 static int lpc32xx_ep_queue(struct usb_ep *_ep,
1823 struct usb_request *_req, gfp_t gfp_flags)
1824 {
1825 struct lpc32xx_request *req;
1826 struct lpc32xx_ep *ep;
1827 struct lpc32xx_udc *udc;
1828 unsigned long flags;
1829 int status = 0;
1830
1831 req = container_of(_req, struct lpc32xx_request, req);
1832 ep = container_of(_ep, struct lpc32xx_ep, ep);
1833
1834 if (!_req || !_req->complete || !_req->buf ||
1835 !list_empty(&req->queue))
1836 return -EINVAL;
1837
1838 udc = ep->udc;
1839
1840 if (!_ep || (!ep->desc && ep->hwep_num_base != 0)) {
1841 dev_dbg(udc->dev, "invalid ep\n");
1842 return -EINVAL;
1843 }
1844
1845
1846 if ((!udc) || (!udc->driver) ||
1847 (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
1848 dev_dbg(udc->dev, "invalid device\n");
1849 return -EINVAL;
1850 }
1851
1852 if (ep->lep) {
1853 enum dma_data_direction direction;
1854 struct lpc32xx_usbd_dd_gad *dd;
1855
1856 /* Map DMA pointer */
1857 if (ep->is_in)
1858 direction = DMA_TO_DEVICE;
1859 else
1860 direction = DMA_FROM_DEVICE;
1861
1862 if (req->req.dma == 0) {
1863 req->req.dma = dma_map_single(
1864 ep->udc->gadget.dev.parent,
1865 req->req.buf, req->req.length, direction);
1866 req->mapped = 1;
1867 } else {
1868 dma_sync_single_for_device(
1869 ep->udc->gadget.dev.parent, req->req.dma,
1870 req->req.length, direction);
1871 req->mapped = 0;
1872 }
1873
1874 /* For the request, build a list of DDs */
1875 dd = udc_dd_alloc(udc);
1876 if (!dd) {
1877 /* Error allocating DD */
1878 return -ENOMEM;
1879 }
1880 req->dd_desc_ptr = dd;
1881
1882 /* Setup the DMA descriptor */
1883 dd->dd_next_phy = dd->dd_next_v = 0;
1884 dd->dd_buffer_addr = req->req.dma;
1885 dd->dd_status = 0;
1886
1887 /* Special handling for ISO EPs */
1888 if (ep->eptype == EP_ISO_TYPE) {
1889 dd->dd_setup = DD_SETUP_ISO_EP |
1890 DD_SETUP_PACKETLEN(0) |
1891 DD_SETUP_DMALENBYTES(1);
1892 dd->dd_iso_ps_mem_addr = dd->this_dma + 24;
1893 if (ep->is_in)
1894 dd->iso_status[0] = req->req.length;
1895 else
1896 dd->iso_status[0] = 0;
1897 } else
1898 dd->dd_setup = DD_SETUP_PACKETLEN(ep->ep.maxpacket) |
1899 DD_SETUP_DMALENBYTES(req->req.length);
1900 }
1901
1902 ep_dbg(ep, "%s queue req %p len %d buf %p (in=%d) z=%d\n", _ep->name,
1903 _req, _req->length, _req->buf, ep->is_in, _req->zero);
1904
1905 spin_lock_irqsave(&udc->lock, flags);
1906
1907 _req->status = -EINPROGRESS;
1908 _req->actual = 0;
1909 req->send_zlp = _req->zero;
1910
1911 /* Kickstart empty queues */
1912 if (list_empty(&ep->queue)) {
1913 list_add_tail(&req->queue, &ep->queue);
1914
1915 if (ep->hwep_num_base == 0) {
1916 /* Handle expected data direction */
1917 if (ep->is_in) {
1918 /* IN packet to host */
1919 udc->ep0state = DATA_IN;
1920 status = udc_ep0_in_req(udc);
1921 } else {
1922 /* OUT packet from host */
1923 udc->ep0state = DATA_OUT;
1924 status = udc_ep0_out_req(udc);
1925 }
1926 } else if (ep->is_in) {
1927 /* IN packet to host and kick off transfer */
1928 if (!ep->req_pending)
1929 udc_ep_in_req_dma(udc, ep);
1930 } else
1931 /* OUT packet from host and kick off list */
1932 if (!ep->req_pending)
1933 udc_ep_out_req_dma(udc, ep);
1934 } else
1935 list_add_tail(&req->queue, &ep->queue);
1936
1937 spin_unlock_irqrestore(&udc->lock, flags);
1938
1939 return (status < 0) ? status : 0;
1940 }
1941
1942 /* Must be called without lock */
1943 static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1944 {
1945 struct lpc32xx_ep *ep;
1946 struct lpc32xx_request *req;
1947 unsigned long flags;
1948
1949 ep = container_of(_ep, struct lpc32xx_ep, ep);
1950 if (!_ep || ep->hwep_num_base == 0)
1951 return -EINVAL;
1952
1953 spin_lock_irqsave(&ep->udc->lock, flags);
1954
1955 /* make sure it's actually queued on this endpoint */
1956 list_for_each_entry(req, &ep->queue, queue) {
1957 if (&req->req == _req)
1958 break;
1959 }
1960 if (&req->req != _req) {
1961 spin_unlock_irqrestore(&ep->udc->lock, flags);
1962 return -EINVAL;
1963 }
1964
1965 done(ep, req, -ECONNRESET);
1966
1967 spin_unlock_irqrestore(&ep->udc->lock, flags);
1968
1969 return 0;
1970 }
1971
1972 /* Must be called without lock */
1973 static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
1974 {
1975 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
1976 struct lpc32xx_udc *udc = ep->udc;
1977 unsigned long flags;
1978
1979 if ((!ep) || (ep->desc == NULL) || (ep->hwep_num <= 1))
1980 return -EINVAL;
1981
1982 /* Don't halt an IN EP */
1983 if (ep->is_in)
1984 return -EAGAIN;
1985
1986 spin_lock_irqsave(&udc->lock, flags);
1987
1988 if (value == 1) {
1989 /* stall */
1990 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
1991 DAT_WR_BYTE(EP_STAT_ST));
1992 } else {
1993 /* End stall */
1994 ep->wedge = 0;
1995 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
1996 DAT_WR_BYTE(0));
1997 }
1998
1999 spin_unlock_irqrestore(&udc->lock, flags);
2000
2001 return 0;
2002 }
2003
2004 /* set the halt feature and ignores clear requests */
2005 static int lpc32xx_ep_set_wedge(struct usb_ep *_ep)
2006 {
2007 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
2008
2009 if (!_ep || !ep->udc)
2010 return -EINVAL;
2011
2012 ep->wedge = 1;
2013
2014 return usb_ep_set_halt(_ep);
2015 }
2016
2017 static const struct usb_ep_ops lpc32xx_ep_ops = {
2018 .enable = lpc32xx_ep_enable,
2019 .disable = lpc32xx_ep_disable,
2020 .alloc_request = lpc32xx_ep_alloc_request,
2021 .free_request = lpc32xx_ep_free_request,
2022 .queue = lpc32xx_ep_queue,
2023 .dequeue = lpc32xx_ep_dequeue,
2024 .set_halt = lpc32xx_ep_set_halt,
2025 .set_wedge = lpc32xx_ep_set_wedge,
2026 };
2027
2028 /* Send a ZLP on a non-0 IN EP */
2029 void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
2030 {
2031 /* Clear EP status */
2032 udc_clearep_getsts(udc, ep->hwep_num);
2033
2034 /* Send ZLP via FIFO mechanism */
2035 udc_write_hwep(udc, ep->hwep_num, NULL, 0);
2036 }
2037
2038 /*
2039 * Handle EP completion for ZLP
2040 * This function will only be called when a delayed ZLP needs to be sent out
2041 * after a DMA transfer has filled both buffers.
2042 */
2043 void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
2044 {
2045 u32 epstatus;
2046 struct lpc32xx_request *req;
2047
2048 if (ep->hwep_num <= 0)
2049 return;
2050
2051 uda_clear_hwepint(udc, ep->hwep_num);
2052
2053 /* If this interrupt isn't enabled, return now */
2054 if (!(udc->enabled_hwepints & (1 << ep->hwep_num)))
2055 return;
2056
2057 /* Get endpoint status */
2058 epstatus = udc_clearep_getsts(udc, ep->hwep_num);
2059
2060 /*
2061 * This should never happen, but protect against writing to the
2062 * buffer when full.
2063 */
2064 if (epstatus & EP_SEL_F)
2065 return;
2066
2067 if (ep->is_in) {
2068 udc_send_in_zlp(udc, ep);
2069 uda_disable_hwepint(udc, ep->hwep_num);
2070 } else
2071 return;
2072
2073 /* If there isn't a request waiting, something went wrong */
2074 req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
2075 if (req) {
2076 done(ep, req, 0);
2077
2078 /* Start another request if ready */
2079 if (!list_empty(&ep->queue)) {
2080 if (ep->is_in)
2081 udc_ep_in_req_dma(udc, ep);
2082 else
2083 udc_ep_out_req_dma(udc, ep);
2084 } else
2085 ep->req_pending = 0;
2086 }
2087 }
2088
2089
2090 /* DMA end of transfer completion */
2091 static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
2092 {
2093 u32 status, epstatus;
2094 struct lpc32xx_request *req;
2095 struct lpc32xx_usbd_dd_gad *dd;
2096
2097 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
2098 ep->totalints++;
2099 #endif
2100
2101 req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
2102 if (!req) {
2103 ep_err(ep, "DMA interrupt on no req!\n");
2104 return;
2105 }
2106 dd = req->dd_desc_ptr;
2107
2108 /* DMA descriptor should always be retired for this call */
2109 if (!(dd->dd_status & DD_STATUS_DD_RETIRED))
2110 ep_warn(ep, "DMA descriptor did not retire\n");
2111
2112 /* Disable DMA */
2113 udc_ep_dma_disable(udc, ep->hwep_num);
2114 writel((1 << ep->hwep_num), USBD_EOTINTCLR(udc->udp_baseaddr));
2115 writel((1 << ep->hwep_num), USBD_NDDRTINTCLR(udc->udp_baseaddr));
2116
2117 /* System error? */
2118 if (readl(USBD_SYSERRTINTST(udc->udp_baseaddr)) &
2119 (1 << ep->hwep_num)) {
2120 writel((1 << ep->hwep_num),
2121 USBD_SYSERRTINTCLR(udc->udp_baseaddr));
2122 ep_err(ep, "AHB critical error!\n");
2123 ep->req_pending = 0;
2124
2125 /* The error could have occurred on a packet of a multipacket
2126 * transfer, so recovering the transfer is not possible. Close
2127 * the request with an error */
2128 done(ep, req, -ECONNABORTED);
2129 return;
2130 }
2131
2132 /* Handle the current DD's status */
2133 status = dd->dd_status;
2134 switch (status & DD_STATUS_STS_MASK) {
2135 case DD_STATUS_STS_NS:
2136 /* DD not serviced? This shouldn't happen! */
2137 ep->req_pending = 0;
2138 ep_err(ep, "DMA critical EP error: DD not serviced (0x%x)!\n",
2139 status);
2140
2141 done(ep, req, -ECONNABORTED);
2142 return;
2143
2144 case DD_STATUS_STS_BS:
2145 /* Interrupt only fires on EOT - This shouldn't happen! */
2146 ep->req_pending = 0;
2147 ep_err(ep, "DMA critical EP error: EOT prior to service completion (0x%x)!\n",
2148 status);
2149 done(ep, req, -ECONNABORTED);
2150 return;
2151
2152 case DD_STATUS_STS_NC:
2153 case DD_STATUS_STS_DUR:
2154 /* Really just a short packet, not an underrun */
2155 /* This is a good status and what we expect */
2156 break;
2157
2158 default:
2159 /* Data overrun, system error, or unknown */
2160 ep->req_pending = 0;
2161 ep_err(ep, "DMA critical EP error: System error (0x%x)!\n",
2162 status);
2163 done(ep, req, -ECONNABORTED);
2164 return;
2165 }
2166
2167 /* ISO endpoints are handled differently */
2168 if (ep->eptype == EP_ISO_TYPE) {
2169 if (ep->is_in)
2170 req->req.actual = req->req.length;
2171 else
2172 req->req.actual = dd->iso_status[0] & 0xFFFF;
2173 } else
2174 req->req.actual += DD_STATUS_CURDMACNT(status);
2175
2176 /* Send a ZLP if necessary. This will be done for non-int
2177 * packets which have a size that is a divisor of MAXP */
2178 if (req->send_zlp) {
2179 /*
2180 * If at least 1 buffer is available, send the ZLP now.
2181 * Otherwise, the ZLP send needs to be deferred until a
2182 * buffer is available.
2183 */
2184 if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) {
2185 udc_clearep_getsts(udc, ep->hwep_num);
2186 uda_enable_hwepint(udc, ep->hwep_num);
2187 epstatus = udc_clearep_getsts(udc, ep->hwep_num);
2188
2189 /* Let the EP interrupt handle the ZLP */
2190 return;
2191 } else
2192 udc_send_in_zlp(udc, ep);
2193 }
2194
2195 /* Transfer request is complete */
2196 done(ep, req, 0);
2197
2198 /* Start another request if ready */
2199 udc_clearep_getsts(udc, ep->hwep_num);
2200 if (!list_empty((&ep->queue))) {
2201 if (ep->is_in)
2202 udc_ep_in_req_dma(udc, ep);
2203 else
2204 udc_ep_out_req_dma(udc, ep);
2205 } else
2206 ep->req_pending = 0;
2207
2208 }
2209
2210 /*
2211 *
2212 * Endpoint 0 functions
2213 *
2214 */
2215 static void udc_handle_dev(struct lpc32xx_udc *udc)
2216 {
2217 u32 tmp;
2218
2219 udc_protocol_cmd_w(udc, CMD_GET_DEV_STAT);
2220 tmp = udc_protocol_cmd_r(udc, DAT_GET_DEV_STAT);
2221
2222 if (tmp & DEV_RST)
2223 uda_usb_reset(udc);
2224 else if (tmp & DEV_CON_CH)
2225 uda_power_event(udc, (tmp & DEV_CON));
2226 else if (tmp & DEV_SUS_CH) {
2227 if (tmp & DEV_SUS) {
2228 if (udc->vbus == 0)
2229 stop_activity(udc);
2230 else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
2231 udc->driver) {
2232 /* Power down transceiver */
2233 udc->poweron = 0;
2234 schedule_work(&udc->pullup_job);
2235 uda_resm_susp_event(udc, 1);
2236 }
2237 } else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
2238 udc->driver && udc->vbus) {
2239 uda_resm_susp_event(udc, 0);
2240 /* Power up transceiver */
2241 udc->poweron = 1;
2242 schedule_work(&udc->pullup_job);
2243 }
2244 }
2245 }
2246
2247 static int udc_get_status(struct lpc32xx_udc *udc, u16 reqtype, u16 wIndex)
2248 {
2249 struct lpc32xx_ep *ep;
2250 u32 ep0buff = 0, tmp;
2251
2252 switch (reqtype & USB_RECIP_MASK) {
2253 case USB_RECIP_INTERFACE:
2254 break; /* Not supported */
2255
2256 case USB_RECIP_DEVICE:
2257 ep0buff = (udc->selfpowered << USB_DEVICE_SELF_POWERED);
2258 if (udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP))
2259 ep0buff |= (1 << USB_DEVICE_REMOTE_WAKEUP);
2260 break;
2261
2262 case USB_RECIP_ENDPOINT:
2263 tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
2264 ep = &udc->ep[tmp];
2265 if ((tmp == 0) || (tmp >= NUM_ENDPOINTS) || (tmp && !ep->desc))
2266 return -EOPNOTSUPP;
2267
2268 if (wIndex & USB_DIR_IN) {
2269 if (!ep->is_in)
2270 return -EOPNOTSUPP; /* Something's wrong */
2271 } else if (ep->is_in)
2272 return -EOPNOTSUPP; /* Not an IN endpoint */
2273
2274 /* Get status of the endpoint */
2275 udc_protocol_cmd_w(udc, CMD_SEL_EP(ep->hwep_num));
2276 tmp = udc_protocol_cmd_r(udc, DAT_SEL_EP(ep->hwep_num));
2277
2278 if (tmp & EP_SEL_ST)
2279 ep0buff = (1 << USB_ENDPOINT_HALT);
2280 else
2281 ep0buff = 0;
2282 break;
2283
2284 default:
2285 break;
2286 }
2287
2288 /* Return data */
2289 udc_write_hwep(udc, EP_IN, &ep0buff, 2);
2290
2291 return 0;
2292 }
2293
2294 static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
2295 {
2296 struct lpc32xx_ep *ep, *ep0 = &udc->ep[0];
2297 struct usb_ctrlrequest ctrlpkt;
2298 int i, bytes;
2299 u16 wIndex, wValue, wLength, reqtype, req, tmp;
2300
2301 /* Nuke previous transfers */
2302 nuke(ep0, -EPROTO);
2303
2304 /* Get setup packet */
2305 bytes = udc_read_hwep(udc, EP_OUT, (u32 *) &ctrlpkt, 8);
2306 if (bytes != 8) {
2307 ep_warn(ep0, "Incorrectly sized setup packet (s/b 8, is %d)!\n",
2308 bytes);
2309 return;
2310 }
2311
2312 /* Native endianness */
2313 wIndex = le16_to_cpu(ctrlpkt.wIndex);
2314 wValue = le16_to_cpu(ctrlpkt.wValue);
2315 wLength = le16_to_cpu(ctrlpkt.wLength);
2316 reqtype = le16_to_cpu(ctrlpkt.bRequestType);
2317
2318 /* Set direction of EP0 */
2319 if (likely(reqtype & USB_DIR_IN))
2320 ep0->is_in = 1;
2321 else
2322 ep0->is_in = 0;
2323
2324 /* Handle SETUP packet */
2325 req = le16_to_cpu(ctrlpkt.bRequest);
2326 switch (req) {
2327 case USB_REQ_CLEAR_FEATURE:
2328 case USB_REQ_SET_FEATURE:
2329 switch (reqtype) {
2330 case (USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2331 if (wValue != USB_DEVICE_REMOTE_WAKEUP)
2332 goto stall; /* Nothing else handled */
2333
2334 /* Tell board about event */
2335 if (req == USB_REQ_CLEAR_FEATURE)
2336 udc->dev_status &=
2337 ~(1 << USB_DEVICE_REMOTE_WAKEUP);
2338 else
2339 udc->dev_status |=
2340 (1 << USB_DEVICE_REMOTE_WAKEUP);
2341 uda_remwkp_cgh(udc);
2342 goto zlp_send;
2343
2344 case (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2345 tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
2346 if ((wValue != USB_ENDPOINT_HALT) ||
2347 (tmp >= NUM_ENDPOINTS))
2348 break;
2349
2350 /* Find hardware endpoint from logical endpoint */
2351 ep = &udc->ep[tmp];
2352 tmp = ep->hwep_num;
2353 if (tmp == 0)
2354 break;
2355
2356 if (req == USB_REQ_SET_FEATURE)
2357 udc_stall_hwep(udc, tmp);
2358 else if (!ep->wedge)
2359 udc_clrstall_hwep(udc, tmp);
2360
2361 goto zlp_send;
2362
2363 default:
2364 break;
2365 }
2366
2367
2368 case USB_REQ_SET_ADDRESS:
2369 if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
2370 udc_set_address(udc, wValue);
2371 goto zlp_send;
2372 }
2373 break;
2374
2375 case USB_REQ_GET_STATUS:
2376 udc_get_status(udc, reqtype, wIndex);
2377 return;
2378
2379 default:
2380 break; /* Let GadgetFS handle the descriptor instead */
2381 }
2382
2383 if (likely(udc->driver)) {
2384 /* device-2-host (IN) or no data setup command, process
2385 * immediately */
2386 spin_unlock(&udc->lock);
2387 i = udc->driver->setup(&udc->gadget, &ctrlpkt);
2388
2389 spin_lock(&udc->lock);
2390 if (req == USB_REQ_SET_CONFIGURATION) {
2391 /* Configuration is set after endpoints are realized */
2392 if (wValue) {
2393 /* Set configuration */
2394 udc_set_device_configured(udc);
2395
2396 udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
2397 DAT_WR_BYTE(AP_CLK |
2398 INAK_BI | INAK_II));
2399 } else {
2400 /* Clear configuration */
2401 udc_set_device_unconfigured(udc);
2402
2403 /* Disable NAK interrupts */
2404 udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
2405 DAT_WR_BYTE(AP_CLK));
2406 }
2407 }
2408
2409 if (i < 0) {
2410 /* setup processing failed, force stall */
2411 dev_err(udc->dev,
2412 "req %02x.%02x protocol STALL; stat %d\n",
2413 reqtype, req, i);
2414 udc->ep0state = WAIT_FOR_SETUP;
2415 goto stall;
2416 }
2417 }
2418
2419 if (!ep0->is_in)
2420 udc_ep0_send_zlp(udc); /* ZLP IN packet on data phase */
2421
2422 return;
2423
2424 stall:
2425 udc_stall_hwep(udc, EP_IN);
2426 return;
2427
2428 zlp_send:
2429 udc_ep0_send_zlp(udc);
2430 return;
2431 }
2432
2433 /* IN endpoint 0 transfer */
2434 static void udc_handle_ep0_in(struct lpc32xx_udc *udc)
2435 {
2436 struct lpc32xx_ep *ep0 = &udc->ep[0];
2437 u32 epstatus;
2438
2439 /* Clear EP interrupt */
2440 epstatus = udc_clearep_getsts(udc, EP_IN);
2441
2442 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
2443 ep0->totalints++;
2444 #endif
2445
2446 /* Stalled? Clear stall and reset buffers */
2447 if (epstatus & EP_SEL_ST) {
2448 udc_clrstall_hwep(udc, EP_IN);
2449 nuke(ep0, -ECONNABORTED);
2450 udc->ep0state = WAIT_FOR_SETUP;
2451 return;
2452 }
2453
2454 /* Is a buffer available? */
2455 if (!(epstatus & EP_SEL_F)) {
2456 /* Handle based on current state */
2457 if (udc->ep0state == DATA_IN)
2458 udc_ep0_in_req(udc);
2459 else {
2460 /* Unknown state for EP0 oe end of DATA IN phase */
2461 nuke(ep0, -ECONNABORTED);
2462 udc->ep0state = WAIT_FOR_SETUP;
2463 }
2464 }
2465 }
2466
2467 /* OUT endpoint 0 transfer */
2468 static void udc_handle_ep0_out(struct lpc32xx_udc *udc)
2469 {
2470 struct lpc32xx_ep *ep0 = &udc->ep[0];
2471 u32 epstatus;
2472
2473 /* Clear EP interrupt */
2474 epstatus = udc_clearep_getsts(udc, EP_OUT);
2475
2476
2477 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
2478 ep0->totalints++;
2479 #endif
2480
2481 /* Stalled? */
2482 if (epstatus & EP_SEL_ST) {
2483 udc_clrstall_hwep(udc, EP_OUT);
2484 nuke(ep0, -ECONNABORTED);
2485 udc->ep0state = WAIT_FOR_SETUP;
2486 return;
2487 }
2488
2489 /* A NAK may occur if a packet couldn't be received yet */
2490 if (epstatus & EP_SEL_EPN)
2491 return;
2492 /* Setup packet incoming? */
2493 if (epstatus & EP_SEL_STP) {
2494 nuke(ep0, 0);
2495 udc->ep0state = WAIT_FOR_SETUP;
2496 }
2497
2498 /* Data available? */
2499 if (epstatus & EP_SEL_F)
2500 /* Handle based on current state */
2501 switch (udc->ep0state) {
2502 case WAIT_FOR_SETUP:
2503 udc_handle_ep0_setup(udc);
2504 break;
2505
2506 case DATA_OUT:
2507 udc_ep0_out_req(udc);
2508 break;
2509
2510 default:
2511 /* Unknown state for EP0 */
2512 nuke(ep0, -ECONNABORTED);
2513 udc->ep0state = WAIT_FOR_SETUP;
2514 }
2515 }
2516
2517 /* Must be called without lock */
2518 static int lpc32xx_get_frame(struct usb_gadget *gadget)
2519 {
2520 int frame;
2521 unsigned long flags;
2522 struct lpc32xx_udc *udc = to_udc(gadget);
2523
2524 if (!udc->clocked)
2525 return -EINVAL;
2526
2527 spin_lock_irqsave(&udc->lock, flags);
2528
2529 frame = (int) udc_get_current_frame(udc);
2530
2531 spin_unlock_irqrestore(&udc->lock, flags);
2532
2533 return frame;
2534 }
2535
2536 static int lpc32xx_wakeup(struct usb_gadget *gadget)
2537 {
2538 return -ENOTSUPP;
2539 }
2540
2541 static int lpc32xx_set_selfpowered(struct usb_gadget *gadget, int is_on)
2542 {
2543 struct lpc32xx_udc *udc = to_udc(gadget);
2544
2545 /* Always self-powered */
2546 udc->selfpowered = (is_on != 0);
2547
2548 return 0;
2549 }
2550
2551 /*
2552 * vbus is here! turn everything on that's ready
2553 * Must be called without lock
2554 */
2555 static int lpc32xx_vbus_session(struct usb_gadget *gadget, int is_active)
2556 {
2557 unsigned long flags;
2558 struct lpc32xx_udc *udc = to_udc(gadget);
2559
2560 spin_lock_irqsave(&udc->lock, flags);
2561
2562 /* Doesn't need lock */
2563 if (udc->driver) {
2564 udc_clk_set(udc, 1);
2565 udc_enable(udc);
2566 pullup(udc, is_active);
2567 } else {
2568 stop_activity(udc);
2569 pullup(udc, 0);
2570
2571 spin_unlock_irqrestore(&udc->lock, flags);
2572 /*
2573 * Wait for all the endpoints to disable,
2574 * before disabling clocks. Don't wait if
2575 * endpoints are not enabled.
2576 */
2577 if (atomic_read(&udc->enabled_ep_cnt))
2578 wait_event_interruptible(udc->ep_disable_wait_queue,
2579 (atomic_read(&udc->enabled_ep_cnt) == 0));
2580
2581 spin_lock_irqsave(&udc->lock, flags);
2582
2583 udc_clk_set(udc, 0);
2584 }
2585
2586 spin_unlock_irqrestore(&udc->lock, flags);
2587
2588 return 0;
2589 }
2590
2591 /* Can be called with or without lock */
2592 static int lpc32xx_pullup(struct usb_gadget *gadget, int is_on)
2593 {
2594 struct lpc32xx_udc *udc = to_udc(gadget);
2595
2596 /* Doesn't need lock */
2597 pullup(udc, is_on);
2598
2599 return 0;
2600 }
2601
2602 static int lpc32xx_start(struct usb_gadget_driver *driver,
2603 int (*bind)(struct usb_gadget *));
2604 static int lpc32xx_stop(struct usb_gadget_driver *driver);
2605
2606 static const struct usb_gadget_ops lpc32xx_udc_ops = {
2607 .get_frame = lpc32xx_get_frame,
2608 .wakeup = lpc32xx_wakeup,
2609 .set_selfpowered = lpc32xx_set_selfpowered,
2610 .vbus_session = lpc32xx_vbus_session,
2611 .pullup = lpc32xx_pullup,
2612 .start = lpc32xx_start,
2613 .stop = lpc32xx_stop,
2614 };
2615
2616 static void nop_release(struct device *dev)
2617 {
2618 /* nothing to free */
2619 }
2620
2621 static struct lpc32xx_udc controller = {
2622 .gadget = {
2623 .ops = &lpc32xx_udc_ops,
2624 .ep0 = &controller.ep[0].ep,
2625 .name = driver_name,
2626 .dev = {
2627 .init_name = "gadget",
2628 .release = nop_release,
2629 }
2630 },
2631 .ep[0] = {
2632 .ep = {
2633 .name = "ep0",
2634 .ops = &lpc32xx_ep_ops,
2635 },
2636 .udc = &controller,
2637 .maxpacket = 64,
2638 .hwep_num_base = 0,
2639 .hwep_num = 0, /* Can be 0 or 1, has special handling */
2640 .lep = 0,
2641 .eptype = EP_CTL_TYPE,
2642 },
2643 .ep[1] = {
2644 .ep = {
2645 .name = "ep1-int",
2646 .ops = &lpc32xx_ep_ops,
2647 },
2648 .udc = &controller,
2649 .maxpacket = 64,
2650 .hwep_num_base = 2,
2651 .hwep_num = 0, /* 2 or 3, will be set later */
2652 .lep = 1,
2653 .eptype = EP_INT_TYPE,
2654 },
2655 .ep[2] = {
2656 .ep = {
2657 .name = "ep2-bulk",
2658 .ops = &lpc32xx_ep_ops,
2659 },
2660 .udc = &controller,
2661 .maxpacket = 64,
2662 .hwep_num_base = 4,
2663 .hwep_num = 0, /* 4 or 5, will be set later */
2664 .lep = 2,
2665 .eptype = EP_BLK_TYPE,
2666 },
2667 .ep[3] = {
2668 .ep = {
2669 .name = "ep3-iso",
2670 .ops = &lpc32xx_ep_ops,
2671 },
2672 .udc = &controller,
2673 .maxpacket = 1023,
2674 .hwep_num_base = 6,
2675 .hwep_num = 0, /* 6 or 7, will be set later */
2676 .lep = 3,
2677 .eptype = EP_ISO_TYPE,
2678 },
2679 .ep[4] = {
2680 .ep = {
2681 .name = "ep4-int",
2682 .ops = &lpc32xx_ep_ops,
2683 },
2684 .udc = &controller,
2685 .maxpacket = 64,
2686 .hwep_num_base = 8,
2687 .hwep_num = 0, /* 8 or 9, will be set later */
2688 .lep = 4,
2689 .eptype = EP_INT_TYPE,
2690 },
2691 .ep[5] = {
2692 .ep = {
2693 .name = "ep5-bulk",
2694 .ops = &lpc32xx_ep_ops,
2695 },
2696 .udc = &controller,
2697 .maxpacket = 64,
2698 .hwep_num_base = 10,
2699 .hwep_num = 0, /* 10 or 11, will be set later */
2700 .lep = 5,
2701 .eptype = EP_BLK_TYPE,
2702 },
2703 .ep[6] = {
2704 .ep = {
2705 .name = "ep6-iso",
2706 .ops = &lpc32xx_ep_ops,
2707 },
2708 .udc = &controller,
2709 .maxpacket = 1023,
2710 .hwep_num_base = 12,
2711 .hwep_num = 0, /* 12 or 13, will be set later */
2712 .lep = 6,
2713 .eptype = EP_ISO_TYPE,
2714 },
2715 .ep[7] = {
2716 .ep = {
2717 .name = "ep7-int",
2718 .ops = &lpc32xx_ep_ops,
2719 },
2720 .udc = &controller,
2721 .maxpacket = 64,
2722 .hwep_num_base = 14,
2723 .hwep_num = 0,
2724 .lep = 7,
2725 .eptype = EP_INT_TYPE,
2726 },
2727 .ep[8] = {
2728 .ep = {
2729 .name = "ep8-bulk",
2730 .ops = &lpc32xx_ep_ops,
2731 },
2732 .udc = &controller,
2733 .maxpacket = 64,
2734 .hwep_num_base = 16,
2735 .hwep_num = 0,
2736 .lep = 8,
2737 .eptype = EP_BLK_TYPE,
2738 },
2739 .ep[9] = {
2740 .ep = {
2741 .name = "ep9-iso",
2742 .ops = &lpc32xx_ep_ops,
2743 },
2744 .udc = &controller,
2745 .maxpacket = 1023,
2746 .hwep_num_base = 18,
2747 .hwep_num = 0,
2748 .lep = 9,
2749 .eptype = EP_ISO_TYPE,
2750 },
2751 .ep[10] = {
2752 .ep = {
2753 .name = "ep10-int",
2754 .ops = &lpc32xx_ep_ops,
2755 },
2756 .udc = &controller,
2757 .maxpacket = 64,
2758 .hwep_num_base = 20,
2759 .hwep_num = 0,
2760 .lep = 10,
2761 .eptype = EP_INT_TYPE,
2762 },
2763 .ep[11] = {
2764 .ep = {
2765 .name = "ep11-bulk",
2766 .ops = &lpc32xx_ep_ops,
2767 },
2768 .udc = &controller,
2769 .maxpacket = 64,
2770 .hwep_num_base = 22,
2771 .hwep_num = 0,
2772 .lep = 11,
2773 .eptype = EP_BLK_TYPE,
2774 },
2775 .ep[12] = {
2776 .ep = {
2777 .name = "ep12-iso",
2778 .ops = &lpc32xx_ep_ops,
2779 },
2780 .udc = &controller,
2781 .maxpacket = 1023,
2782 .hwep_num_base = 24,
2783 .hwep_num = 0,
2784 .lep = 12,
2785 .eptype = EP_ISO_TYPE,
2786 },
2787 .ep[13] = {
2788 .ep = {
2789 .name = "ep13-int",
2790 .ops = &lpc32xx_ep_ops,
2791 },
2792 .udc = &controller,
2793 .maxpacket = 64,
2794 .hwep_num_base = 26,
2795 .hwep_num = 0,
2796 .lep = 13,
2797 .eptype = EP_INT_TYPE,
2798 },
2799 .ep[14] = {
2800 .ep = {
2801 .name = "ep14-bulk",
2802 .ops = &lpc32xx_ep_ops,
2803 },
2804 .udc = &controller,
2805 .maxpacket = 64,
2806 .hwep_num_base = 28,
2807 .hwep_num = 0,
2808 .lep = 14,
2809 .eptype = EP_BLK_TYPE,
2810 },
2811 .ep[15] = {
2812 .ep = {
2813 .name = "ep15-bulk",
2814 .ops = &lpc32xx_ep_ops,
2815 },
2816 .udc = &controller,
2817 .maxpacket = 1023,
2818 .hwep_num_base = 30,
2819 .hwep_num = 0,
2820 .lep = 15,
2821 .eptype = EP_BLK_TYPE,
2822 },
2823 };
2824
2825 /* ISO and status interrupts */
2826 static irqreturn_t lpc32xx_usb_lp_irq(int irq, void *_udc)
2827 {
2828 u32 tmp, devstat;
2829 struct lpc32xx_udc *udc = _udc;
2830
2831 spin_lock(&udc->lock);
2832
2833 /* Read the device status register */
2834 devstat = readl(USBD_DEVINTST(udc->udp_baseaddr));
2835
2836 devstat &= ~USBD_EP_FAST;
2837 writel(devstat, USBD_DEVINTCLR(udc->udp_baseaddr));
2838 devstat = devstat & udc->enabled_devints;
2839
2840 /* Device specific handling needed? */
2841 if (devstat & USBD_DEV_STAT)
2842 udc_handle_dev(udc);
2843
2844 /* Start of frame? (devstat & FRAME_INT):
2845 * The frame interrupt isn't really needed for ISO support,
2846 * as the driver will queue the necessary packets */
2847
2848 /* Error? */
2849 if (devstat & ERR_INT) {
2850 /* All types of errors, from cable removal during transfer to
2851 * misc protocol and bit errors. These are mostly for just info,
2852 * as the USB hardware will work around these. If these errors
2853 * happen alot, something is wrong. */
2854 udc_protocol_cmd_w(udc, CMD_RD_ERR_STAT);
2855 tmp = udc_protocol_cmd_r(udc, DAT_RD_ERR_STAT);
2856 dev_dbg(udc->dev, "Device error (0x%x)!\n", tmp);
2857 }
2858
2859 spin_unlock(&udc->lock);
2860
2861 return IRQ_HANDLED;
2862 }
2863
2864 /* EP interrupts */
2865 static irqreturn_t lpc32xx_usb_hp_irq(int irq, void *_udc)
2866 {
2867 u32 tmp;
2868 struct lpc32xx_udc *udc = _udc;
2869
2870 spin_lock(&udc->lock);
2871
2872 /* Read the device status register */
2873 writel(USBD_EP_FAST, USBD_DEVINTCLR(udc->udp_baseaddr));
2874
2875 /* Endpoints */
2876 tmp = readl(USBD_EPINTST(udc->udp_baseaddr));
2877
2878 /* Special handling for EP0 */
2879 if (tmp & (EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
2880 /* Handle EP0 IN */
2881 if (tmp & (EP_MASK_SEL(0, EP_IN)))
2882 udc_handle_ep0_in(udc);
2883
2884 /* Handle EP0 OUT */
2885 if (tmp & (EP_MASK_SEL(0, EP_OUT)))
2886 udc_handle_ep0_out(udc);
2887 }
2888
2889 /* All other EPs */
2890 if (tmp & ~(EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
2891 int i;
2892
2893 /* Handle other EP interrupts */
2894 for (i = 1; i < NUM_ENDPOINTS; i++) {
2895 if (tmp & (1 << udc->ep[i].hwep_num))
2896 udc_handle_eps(udc, &udc->ep[i]);
2897 }
2898 }
2899
2900 spin_unlock(&udc->lock);
2901
2902 return IRQ_HANDLED;
2903 }
2904
2905 static irqreturn_t lpc32xx_usb_devdma_irq(int irq, void *_udc)
2906 {
2907 struct lpc32xx_udc *udc = _udc;
2908
2909 int i;
2910 u32 tmp;
2911
2912 spin_lock(&udc->lock);
2913
2914 /* Handle EP DMA EOT interrupts */
2915 tmp = readl(USBD_EOTINTST(udc->udp_baseaddr)) |
2916 (readl(USBD_EPDMAST(udc->udp_baseaddr)) &
2917 readl(USBD_NDDRTINTST(udc->udp_baseaddr))) |
2918 readl(USBD_SYSERRTINTST(udc->udp_baseaddr));
2919 for (i = 1; i < NUM_ENDPOINTS; i++) {
2920 if (tmp & (1 << udc->ep[i].hwep_num))
2921 udc_handle_dma_ep(udc, &udc->ep[i]);
2922 }
2923
2924 spin_unlock(&udc->lock);
2925
2926 return IRQ_HANDLED;
2927 }
2928
2929 /*
2930 *
2931 * VBUS detection, pullup handler, and Gadget cable state notification
2932 *
2933 */
2934 static void vbus_work(struct work_struct *work)
2935 {
2936 u8 value;
2937 struct lpc32xx_udc *udc = container_of(work, struct lpc32xx_udc,
2938 vbus_job);
2939
2940 if (udc->enabled != 0) {
2941 /* Discharge VBUS real quick */
2942 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
2943 ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
2944
2945 /* Give VBUS some time (100mS) to discharge */
2946 msleep(100);
2947
2948 /* Disable VBUS discharge resistor */
2949 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
2950 ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
2951 OTG1_VBUS_DISCHRG);
2952
2953 /* Clear interrupt */
2954 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
2955 ISP1301_I2C_INTERRUPT_LATCH |
2956 ISP1301_I2C_REG_CLEAR_ADDR, ~0);
2957
2958 /* Get the VBUS status from the transceiver */
2959 value = i2c_smbus_read_byte_data(udc->isp1301_i2c_client,
2960 ISP1301_I2C_OTG_CONTROL_2);
2961
2962 /* VBUS on or off? */
2963 if (value & OTG_B_SESS_VLD)
2964 udc->vbus = 1;
2965 else
2966 udc->vbus = 0;
2967
2968 /* VBUS changed? */
2969 if (udc->last_vbus != udc->vbus) {
2970 udc->last_vbus = udc->vbus;
2971 lpc32xx_vbus_session(&udc->gadget, udc->vbus);
2972 }
2973 }
2974
2975 /* Re-enable after completion */
2976 enable_irq(udc->udp_irq[IRQ_USB_ATX]);
2977 }
2978
2979 static irqreturn_t lpc32xx_usb_vbus_irq(int irq, void *_udc)
2980 {
2981 struct lpc32xx_udc *udc = _udc;
2982
2983 /* Defer handling of VBUS IRQ to work queue */
2984 disable_irq_nosync(udc->udp_irq[IRQ_USB_ATX]);
2985 schedule_work(&udc->vbus_job);
2986
2987 return IRQ_HANDLED;
2988 }
2989
2990 static int lpc32xx_start(struct usb_gadget_driver *driver,
2991 int (*bind)(struct usb_gadget *))
2992 {
2993 struct lpc32xx_udc *udc = &controller;
2994 int retval, i;
2995
2996 if (!driver || driver->max_speed < USB_SPEED_FULL ||
2997 !bind || !driver->setup) {
2998 dev_err(udc->dev, "bad parameter.\n");
2999 return -EINVAL;
3000 }
3001
3002 if (udc->driver) {
3003 dev_err(udc->dev, "UDC already has a gadget driver\n");
3004 return -EBUSY;
3005 }
3006
3007 udc->driver = driver;
3008 udc->gadget.dev.driver = &driver->driver;
3009 udc->gadget.dev.of_node = udc->dev->of_node;
3010 udc->enabled = 1;
3011 udc->selfpowered = 1;
3012 udc->vbus = 0;
3013
3014 retval = bind(&udc->gadget);
3015 if (retval) {
3016 dev_err(udc->dev, "bind() returned %d\n", retval);
3017 udc->enabled = 0;
3018 udc->selfpowered = 0;
3019 udc->driver = NULL;
3020 udc->gadget.dev.driver = NULL;
3021 return retval;
3022 }
3023
3024 dev_dbg(udc->dev, "bound to %s\n", driver->driver.name);
3025
3026 /* Force VBUS process once to check for cable insertion */
3027 udc->last_vbus = udc->vbus = 0;
3028 schedule_work(&udc->vbus_job);
3029
3030 /* Do not re-enable ATX IRQ (3) */
3031 for (i = IRQ_USB_LP; i < IRQ_USB_ATX; i++)
3032 enable_irq(udc->udp_irq[i]);
3033
3034 return 0;
3035 }
3036
3037 static int lpc32xx_stop(struct usb_gadget_driver *driver)
3038 {
3039 int i;
3040 struct lpc32xx_udc *udc = &controller;
3041
3042 if (!driver || driver != udc->driver || !driver->unbind)
3043 return -EINVAL;
3044
3045 /* Disable USB pullup */
3046 isp1301_pullup_enable(udc, 0, 1);
3047
3048 for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
3049 disable_irq(udc->udp_irq[i]);
3050
3051 if (udc->clocked) {
3052
3053 spin_lock(&udc->lock);
3054 stop_activity(udc);
3055 spin_unlock(&udc->lock);
3056
3057 /*
3058 * Wait for all the endpoints to disable,
3059 * before disabling clocks. Don't wait if
3060 * endpoints are not enabled.
3061 */
3062 if (atomic_read(&udc->enabled_ep_cnt))
3063 wait_event_interruptible(udc->ep_disable_wait_queue,
3064 (atomic_read(&udc->enabled_ep_cnt) == 0));
3065
3066 spin_lock(&udc->lock);
3067 udc_clk_set(udc, 0);
3068 spin_unlock(&udc->lock);
3069 }
3070
3071 udc->enabled = 0;
3072 pullup(udc, 0);
3073
3074 driver->unbind(&udc->gadget);
3075 udc->gadget.dev.driver = NULL;
3076 udc->driver = NULL;
3077
3078 dev_dbg(udc->dev, "unbound from %s\n", driver->driver.name);
3079 return 0;
3080 }
3081
3082 static void lpc32xx_udc_shutdown(struct platform_device *dev)
3083 {
3084 /* Force disconnect on reboot */
3085 struct lpc32xx_udc *udc = &controller;
3086
3087 pullup(udc, 0);
3088 }
3089
3090 /*
3091 * Callbacks to be overridden by options passed via OF (TODO)
3092 */
3093
3094 static void lpc32xx_usbd_conn_chg(int conn)
3095 {
3096 /* Do nothing, it might be nice to enable an LED
3097 * based on conn state being !0 */
3098 }
3099
3100 static void lpc32xx_usbd_susp_chg(int susp)
3101 {
3102 /* Device suspend if susp != 0 */
3103 }
3104
3105 static void lpc32xx_rmwkup_chg(int remote_wakup_enable)
3106 {
3107 /* Enable or disable USB remote wakeup */
3108 }
3109
3110 struct lpc32xx_usbd_cfg lpc32xx_usbddata = {
3111 .vbus_drv_pol = 0,
3112 .conn_chgb = &lpc32xx_usbd_conn_chg,
3113 .susp_chgb = &lpc32xx_usbd_susp_chg,
3114 .rmwk_chgb = &lpc32xx_rmwkup_chg,
3115 };
3116
3117
3118 static u64 lpc32xx_usbd_dmamask = ~(u32) 0x7F;
3119
3120 static int __init lpc32xx_udc_probe(struct platform_device *pdev)
3121 {
3122 struct device *dev = &pdev->dev;
3123 struct lpc32xx_udc *udc = &controller;
3124 int retval, i;
3125 struct resource *res;
3126 dma_addr_t dma_handle;
3127 struct device_node *isp1301_node;
3128
3129 /* init software state */
3130 udc->gadget.dev.parent = dev;
3131 udc->pdev = pdev;
3132 udc->dev = &pdev->dev;
3133 udc->enabled = 0;
3134
3135 if (pdev->dev.of_node) {
3136 isp1301_node = of_parse_phandle(pdev->dev.of_node,
3137 "transceiver", 0);
3138 } else {
3139 isp1301_node = NULL;
3140 }
3141
3142 udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
3143 if (!udc->isp1301_i2c_client)
3144 return -EPROBE_DEFER;
3145
3146 dev_info(udc->dev, "ISP1301 I2C device at address 0x%x\n",
3147 udc->isp1301_i2c_client->addr);
3148
3149 pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
3150 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
3151
3152 udc->board = &lpc32xx_usbddata;
3153
3154 /*
3155 * Resources are mapped as follows:
3156 * IORESOURCE_MEM, base address and size of USB space
3157 * IORESOURCE_IRQ, USB device low priority interrupt number
3158 * IORESOURCE_IRQ, USB device high priority interrupt number
3159 * IORESOURCE_IRQ, USB device interrupt number
3160 * IORESOURCE_IRQ, USB transceiver interrupt number
3161 */
3162 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3163 if (!res)
3164 return -ENXIO;
3165
3166 spin_lock_init(&udc->lock);
3167
3168 /* Get IRQs */
3169 for (i = 0; i < 4; i++) {
3170 udc->udp_irq[i] = platform_get_irq(pdev, i);
3171 if (udc->udp_irq[i] < 0) {
3172 dev_err(udc->dev,
3173 "irq resource %d not available!\n", i);
3174 return udc->udp_irq[i];
3175 }
3176 }
3177
3178 udc->io_p_start = res->start;
3179 udc->io_p_size = resource_size(res);
3180 if (!request_mem_region(udc->io_p_start, udc->io_p_size, driver_name)) {
3181 dev_err(udc->dev, "someone's using UDC memory\n");
3182 return -EBUSY;
3183 }
3184
3185 udc->udp_baseaddr = ioremap(udc->io_p_start, udc->io_p_size);
3186 if (!udc->udp_baseaddr) {
3187 retval = -ENOMEM;
3188 dev_err(udc->dev, "IO map failure\n");
3189 goto io_map_fail;
3190 }
3191
3192 /* Enable AHB slave USB clock, needed for further USB clock control */
3193 writel(USB_SLAVE_HCLK_EN | (1 << 19), USB_CTRL);
3194
3195 /* Get required clocks */
3196 udc->usb_pll_clk = clk_get(&pdev->dev, "ck_pll5");
3197 if (IS_ERR(udc->usb_pll_clk)) {
3198 dev_err(udc->dev, "failed to acquire USB PLL\n");
3199 retval = PTR_ERR(udc->usb_pll_clk);
3200 goto pll_get_fail;
3201 }
3202 udc->usb_slv_clk = clk_get(&pdev->dev, "ck_usbd");
3203 if (IS_ERR(udc->usb_slv_clk)) {
3204 dev_err(udc->dev, "failed to acquire USB device clock\n");
3205 retval = PTR_ERR(udc->usb_slv_clk);
3206 goto usb_clk_get_fail;
3207 }
3208 udc->usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg");
3209 if (IS_ERR(udc->usb_otg_clk)) {
3210 dev_err(udc->dev, "failed to acquire USB otg clock\n");
3211 retval = PTR_ERR(udc->usb_otg_clk);
3212 goto usb_otg_clk_get_fail;
3213 }
3214
3215 /* Setup PLL clock to 48MHz */
3216 retval = clk_enable(udc->usb_pll_clk);
3217 if (retval < 0) {
3218 dev_err(udc->dev, "failed to start USB PLL\n");
3219 goto pll_enable_fail;
3220 }
3221
3222 retval = clk_set_rate(udc->usb_pll_clk, 48000);
3223 if (retval < 0) {
3224 dev_err(udc->dev, "failed to set USB clock rate\n");
3225 goto pll_set_fail;
3226 }
3227
3228 writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, USB_CTRL);
3229
3230 /* Enable USB device clock */
3231 retval = clk_enable(udc->usb_slv_clk);
3232 if (retval < 0) {
3233 dev_err(udc->dev, "failed to start USB device clock\n");
3234 goto usb_clk_enable_fail;
3235 }
3236
3237 /* Enable USB OTG clock */
3238 retval = clk_enable(udc->usb_otg_clk);
3239 if (retval < 0) {
3240 dev_err(udc->dev, "failed to start USB otg clock\n");
3241 goto usb_otg_clk_enable_fail;
3242 }
3243
3244 /* Setup deferred workqueue data */
3245 udc->poweron = udc->pullup = 0;
3246 INIT_WORK(&udc->pullup_job, pullup_work);
3247 INIT_WORK(&udc->vbus_job, vbus_work);
3248 #ifdef CONFIG_PM
3249 INIT_WORK(&udc->power_job, power_work);
3250 #endif
3251
3252 /* All clocks are now on */
3253 udc->clocked = 1;
3254
3255 isp1301_udc_configure(udc);
3256 /* Allocate memory for the UDCA */
3257 udc->udca_v_base = dma_alloc_coherent(&pdev->dev, UDCA_BUFF_SIZE,
3258 &dma_handle,
3259 (GFP_KERNEL | GFP_DMA));
3260 if (!udc->udca_v_base) {
3261 dev_err(udc->dev, "error getting UDCA region\n");
3262 retval = -ENOMEM;
3263 goto i2c_fail;
3264 }
3265 udc->udca_p_base = dma_handle;
3266 dev_dbg(udc->dev, "DMA buffer(0x%x bytes), P:0x%08x, V:0x%p\n",
3267 UDCA_BUFF_SIZE, udc->udca_p_base, udc->udca_v_base);
3268
3269 /* Setup the DD DMA memory pool */
3270 udc->dd_cache = dma_pool_create("udc_dd", udc->dev,
3271 sizeof(struct lpc32xx_usbd_dd_gad),
3272 sizeof(u32), 0);
3273 if (!udc->dd_cache) {
3274 dev_err(udc->dev, "error getting DD DMA region\n");
3275 retval = -ENOMEM;
3276 goto dma_alloc_fail;
3277 }
3278
3279 /* Clear USB peripheral and initialize gadget endpoints */
3280 udc_disable(udc);
3281 udc_reinit(udc);
3282
3283 retval = device_register(&udc->gadget.dev);
3284 if (retval < 0) {
3285 dev_err(udc->dev, "Device registration failure\n");
3286 goto dev_register_fail;
3287 }
3288
3289 /* Request IRQs - low and high priority USB device IRQs are routed to
3290 * the same handler, while the DMA interrupt is routed elsewhere */
3291 retval = request_irq(udc->udp_irq[IRQ_USB_LP], lpc32xx_usb_lp_irq,
3292 0, "udc_lp", udc);
3293 if (retval < 0) {
3294 dev_err(udc->dev, "LP request irq %d failed\n",
3295 udc->udp_irq[IRQ_USB_LP]);
3296 goto irq_lp_fail;
3297 }
3298 retval = request_irq(udc->udp_irq[IRQ_USB_HP], lpc32xx_usb_hp_irq,
3299 0, "udc_hp", udc);
3300 if (retval < 0) {
3301 dev_err(udc->dev, "HP request irq %d failed\n",
3302 udc->udp_irq[IRQ_USB_HP]);
3303 goto irq_hp_fail;
3304 }
3305
3306 retval = request_irq(udc->udp_irq[IRQ_USB_DEVDMA],
3307 lpc32xx_usb_devdma_irq, 0, "udc_dma", udc);
3308 if (retval < 0) {
3309 dev_err(udc->dev, "DEV request irq %d failed\n",
3310 udc->udp_irq[IRQ_USB_DEVDMA]);
3311 goto irq_dev_fail;
3312 }
3313
3314 /* The transceiver interrupt is used for VBUS detection and will
3315 kick off the VBUS handler function */
3316 retval = request_irq(udc->udp_irq[IRQ_USB_ATX], lpc32xx_usb_vbus_irq,
3317 0, "udc_otg", udc);
3318 if (retval < 0) {
3319 dev_err(udc->dev, "VBUS request irq %d failed\n",
3320 udc->udp_irq[IRQ_USB_ATX]);
3321 goto irq_xcvr_fail;
3322 }
3323
3324 /* Initialize wait queue */
3325 init_waitqueue_head(&udc->ep_disable_wait_queue);
3326 atomic_set(&udc->enabled_ep_cnt, 0);
3327
3328 /* Keep all IRQs disabled until GadgetFS starts up */
3329 for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
3330 disable_irq(udc->udp_irq[i]);
3331
3332 retval = usb_add_gadget_udc(dev, &udc->gadget);
3333 if (retval < 0)
3334 goto add_gadget_fail;
3335
3336 dev_set_drvdata(dev, udc);
3337 device_init_wakeup(dev, 1);
3338 create_debug_file(udc);
3339
3340 /* Disable clocks for now */
3341 udc_clk_set(udc, 0);
3342
3343 dev_info(udc->dev, "%s version %s\n", driver_name, DRIVER_VERSION);
3344 return 0;
3345
3346 add_gadget_fail:
3347 free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
3348 irq_xcvr_fail:
3349 free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
3350 irq_dev_fail:
3351 free_irq(udc->udp_irq[IRQ_USB_HP], udc);
3352 irq_hp_fail:
3353 free_irq(udc->udp_irq[IRQ_USB_LP], udc);
3354 irq_lp_fail:
3355 device_unregister(&udc->gadget.dev);
3356 dev_register_fail:
3357 dma_pool_destroy(udc->dd_cache);
3358 dma_alloc_fail:
3359 dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
3360 udc->udca_v_base, udc->udca_p_base);
3361 i2c_fail:
3362 clk_disable(udc->usb_otg_clk);
3363 usb_otg_clk_enable_fail:
3364 clk_disable(udc->usb_slv_clk);
3365 usb_clk_enable_fail:
3366 pll_set_fail:
3367 clk_disable(udc->usb_pll_clk);
3368 pll_enable_fail:
3369 clk_put(udc->usb_slv_clk);
3370 usb_otg_clk_get_fail:
3371 clk_put(udc->usb_otg_clk);
3372 usb_clk_get_fail:
3373 clk_put(udc->usb_pll_clk);
3374 pll_get_fail:
3375 iounmap(udc->udp_baseaddr);
3376 io_map_fail:
3377 release_mem_region(udc->io_p_start, udc->io_p_size);
3378 dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval);
3379
3380 return retval;
3381 }
3382
3383 static int __devexit lpc32xx_udc_remove(struct platform_device *pdev)
3384 {
3385 struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
3386
3387 usb_del_gadget_udc(&udc->gadget);
3388 if (udc->driver)
3389 return -EBUSY;
3390
3391 udc_clk_set(udc, 1);
3392 udc_disable(udc);
3393 pullup(udc, 0);
3394
3395 free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
3396
3397 device_init_wakeup(&pdev->dev, 0);
3398 remove_debug_file(udc);
3399
3400 dma_pool_destroy(udc->dd_cache);
3401 dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
3402 udc->udca_v_base, udc->udca_p_base);
3403 free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
3404 free_irq(udc->udp_irq[IRQ_USB_HP], udc);
3405 free_irq(udc->udp_irq[IRQ_USB_LP], udc);
3406
3407 device_unregister(&udc->gadget.dev);
3408
3409 clk_disable(udc->usb_otg_clk);
3410 clk_put(udc->usb_otg_clk);
3411 clk_disable(udc->usb_slv_clk);
3412 clk_put(udc->usb_slv_clk);
3413 clk_disable(udc->usb_pll_clk);
3414 clk_put(udc->usb_pll_clk);
3415 iounmap(udc->udp_baseaddr);
3416 release_mem_region(udc->io_p_start, udc->io_p_size);
3417
3418 return 0;
3419 }
3420
3421 #ifdef CONFIG_PM
3422 static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg)
3423 {
3424 struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
3425
3426 if (udc->clocked) {
3427 /* Power down ISP */
3428 udc->poweron = 0;
3429 isp1301_set_powerstate(udc, 0);
3430
3431 /* Disable clocking */
3432 udc_clk_set(udc, 0);
3433
3434 /* Keep clock flag on, so we know to re-enable clocks
3435 on resume */
3436 udc->clocked = 1;
3437
3438 /* Kill global USB clock */
3439 clk_disable(udc->usb_slv_clk);
3440 }
3441
3442 return 0;
3443 }
3444
3445 static int lpc32xx_udc_resume(struct platform_device *pdev)
3446 {
3447 struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
3448
3449 if (udc->clocked) {
3450 /* Enable global USB clock */
3451 clk_enable(udc->usb_slv_clk);
3452
3453 /* Enable clocking */
3454 udc_clk_set(udc, 1);
3455
3456 /* ISP back to normal power mode */
3457 udc->poweron = 1;
3458 isp1301_set_powerstate(udc, 1);
3459 }
3460
3461 return 0;
3462 }
3463 #else
3464 #define lpc32xx_udc_suspend NULL
3465 #define lpc32xx_udc_resume NULL
3466 #endif
3467
3468 #ifdef CONFIG_OF
3469 static struct of_device_id lpc32xx_udc_of_match[] = {
3470 { .compatible = "nxp,lpc3220-udc", },
3471 { },
3472 };
3473 MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match);
3474 #endif
3475
3476 static struct platform_driver lpc32xx_udc_driver = {
3477 .remove = __devexit_p(lpc32xx_udc_remove),
3478 .shutdown = lpc32xx_udc_shutdown,
3479 .suspend = lpc32xx_udc_suspend,
3480 .resume = lpc32xx_udc_resume,
3481 .driver = {
3482 .name = (char *) driver_name,
3483 .owner = THIS_MODULE,
3484 .of_match_table = of_match_ptr(lpc32xx_udc_of_match),
3485 },
3486 };
3487
3488 static int __init udc_init_module(void)
3489 {
3490 return platform_driver_probe(&lpc32xx_udc_driver, lpc32xx_udc_probe);
3491 }
3492 module_init(udc_init_module);
3493
3494 static void __exit udc_exit_module(void)
3495 {
3496 platform_driver_unregister(&lpc32xx_udc_driver);
3497 }
3498 module_exit(udc_exit_module);
3499
3500 MODULE_DESCRIPTION("LPC32XX udc driver");
3501 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
3502 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
3503 MODULE_LICENSE("GPL");
3504 MODULE_ALIAS("platform:lpc32xx_udc");