import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / misc / mediatek / xhci_test / xhci.c
1 /*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 #include <linux/irq.h>
24 #include <linux/log2.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/slab.h>
28
29 #include <linux/kernel.h> /* printk() */
30 #include <linux/fs.h> /* everything... */
31 #include <linux/errno.h> /* error codes */
32 #include <linux/types.h> /* size_t */
33 #include <linux/proc_fs.h>
34 #include <linux/fcntl.h> /* O_ACCMODE */
35 #include <linux/seq_file.h>
36 #include <linux/cdev.h>
37 //#include <linux/pci.h>
38 #include <asm/unaligned.h>
39 //#include <linux/usb/hcd.h>
40 #include "xhci.h"
41 #include "mtk-test.h"
42 #include "mtk-test-lib.h"
43 #include "xhci-platform.c"
44 #include "mtk-usb-hcd.h"
45 #include "xhci-mtk-power.h"
46 #include "xhci-mtk-scheduler.h"
47
48 #if 1 // USBIF
49 #include <mach/eint.h>
50 #include <linux/irq.h>
51 //#include <linux/switch.h>
52 #endif
53
54 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
55 static int link_quirk;
56
57 static void xhci_work(struct xhci_hcd *xhci){
58 u32 temp;
59 u64 temp_64;
60 /*
61 * Clear the op reg interrupt status first,
62 * so we can receive interrupts from other MSI-X interrupters.
63 * Write 1 to clear the interrupt status.
64 */
65 temp = xhci_readl(xhci, &xhci->op_regs->status);
66 printk(KERN_ERR "[OTG_H][xhci_work] read status 0x%x\n", temp);
67
68 temp |= STS_EINT;
69 xhci_writel(xhci, temp, &xhci->op_regs->status);
70
71 /* Acknowledge the interrupt */
72 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
73 printk(KERN_ERR "[OTG_H][xhci_work] read irq_pending 0x%x\n", temp);
74
75 temp |= 0x3;
76 xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
77
78 /* Flush posted writes */
79 xhci_readl(xhci, &xhci->ir_set->irq_pending);
80
81 while(mtktest_xhci_handle_event(xhci) > 0){}
82
83 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
84 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
85 xhci_dbg(xhci, "Clear EHB bit (RW1C)");
86 xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
87 /* Flush posted writes -- FIXME is this necessary? */
88 xhci_readl(xhci, &xhci->ir_set->irq_pending);
89 }
90
91 irqreturn_t mtktest_xhci_mtk_irq(struct usb_hcd *hcd){
92 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
93 u32 temp, temp2;
94 union xhci_trb *trb;
95 #if TEST_OTG
96 u32 temp3;
97 #endif
98
99 spin_lock(&xhci->lock);
100 #if TEST_OTG
101 temp3 = readl(SSUSB_OTG_STS);
102 printk(KERN_ERR "[OTG_H][IRQ] OTG_STS 0x%x\n", temp3);
103 #if 0
104 if(temp3 & SSUSB_IDDIG){
105 g_otg_iddig = 1;
106 }
107 else{
108 g_otg_iddig = 0;
109 }
110 #endif
111 #if 0
112 if(temp3 & SSUSB_ATTACH_A_ROLE){
113 #if 0
114 //set OTG_VBUSVALID_SEL for host
115 temp = readl(SSUSB_U2_CTRL(0));
116 temp = temp | SSUSB_U2_PORT_OTG_HOST_VBUSVALID_SEL;
117 writel(temp, SSUSB_U2_CTRL(0));
118 #endif
119 //attached as device-A, turn on port power of all xhci port
120 mtktest_enableXhciAllPortPower(xhci);
121 writel(SSUSB_ATTACH_A_ROLE_CLR, SSUSB_OTG_STS_CLR);
122 spin_unlock(&xhci->lock);
123 return IRQ_HANDLED;
124 }
125 #endif
126 if(temp3 & SSUSB_CHG_A_ROLE_A){
127 g_otg_wait_con = true;
128 g_otg_hnp_become_dev = false ;
129 g_otg_hnp_become_host = true;
130 mb() ;
131 writel(SSUSB_CHG_A_ROLE_A_CLR, SSUSB_OTG_STS_CLR);
132 //set host sel
133 printk(KERN_ERR "[OTG_H] going to set dma to host\n");
134
135
136 #if 0
137 writel(0x0f0f0f0f, 0xf00447bc);
138 while((readl(0xf00447c4) & 0x2000) == 0x2000){
139
140 }
141 printk(KERN_ERR "[OTG_H] can set dma to host\n");
142 #endif
143 temp = readl(SSUSB_U2_CTRL(0));
144 temp = temp | SSUSB_U2_PORT_HOST_SEL;
145 writel(temp, SSUSB_U2_CTRL(0));
146 spin_unlock(&xhci->lock);
147 return IRQ_HANDLED;
148 }
149 if(temp3 & SSUSB_CHG_B_ROLE_A){
150 printk("[OTG_H] get SSUSB_CHG_B_ROLE_A\n");
151 g_otg_hnp_become_dev = true;
152 mb() ;
153 writel(SSUSB_CHG_B_ROLE_A_CLR, SSUSB_OTG_STS_CLR);
154 spin_unlock(&xhci->lock);
155 return IRQ_HANDLED;
156 }
157 if(temp3 & SSUSB_SRP_REQ_INTR){
158 //set port_power
159 //g_otg_srp_pend = false;
160 //mb() ;
161 //while(g_otg_srp_pend);
162 printk("[OTG_H] get srp interrupt, just clear it\n");
163
164 writel(SSUSB_SRP_REQ_INTR_CLR, SSUSB_OTG_STS_CLR);
165 spin_unlock(&xhci->lock);
166 return IRQ_HANDLED;
167 }
168 #endif
169 trb = xhci->event_ring->dequeue;
170
171 temp = xhci_readl(xhci, &xhci->op_regs->status);
172 temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
173
174 //printk(KERN_ERR "[OTG_H]mtktest_xhci_mtk_irq , status: %x\n", temp);
175
176 //printk(KERN_ERR "[OTG_H]mtktest_xhci_mtk_irq , irq_pending: %x\n", temp2);
177
178 if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
179 spin_unlock(&xhci->lock);
180 return IRQ_NONE;
181 }
182 xhci_dbg(xhci, "--> interrupt in: op_reg(%08x), irq_pend(%08x)\n", temp, temp2);
183
184 #if 0
185 xhci_dbg(xhci, "Got interrupt\n");
186 xhci_dbg(xhci, "op reg status = %08x\n", temp);
187 xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2);
188 xhci_dbg(xhci, "Event ring dequeue ptr:\n");
189 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
190 (unsigned long long)mtktest_xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
191 lower_32_bits(trb->link.segment_ptr),
192 upper_32_bits(trb->link.segment_ptr),
193 (unsigned int) trb->link.intr_target,
194 (unsigned int) trb->link.control);
195 #endif
196 if(g_intr_handled != -1){
197 g_intr_handled++;
198 }
199 xhci_work(xhci);
200 spin_unlock(&xhci->lock);
201 xhci_dbg(xhci, "<-- interrupt out\n");
202 return IRQ_HANDLED;
203 }
204
205
206 #if 1 // USBIF
207 static int mtk_idpin_irqnum;
208
209 #if 0
210 #define IDPIN_IN MT_EINT_POL_NEG
211 #define IDPIN_OUT MT_EINT_POL_POS
212 #else
213 enum usbif_idpin_state {
214 IDPIN_IN,
215 IDPIN_OUT,
216 };
217
218 #endif
219
220 static struct xhci_hcd *mtk_xhci = NULL;
221 static bool mtk_id_nxt_state = IDPIN_IN;
222 #define IDDIG_EINT_PIN (16)
223
224 static bool mtktest_set_iddig_out_detect(){
225 #if 0
226 mt_eint_set_polarity(IDDIG_EINT_PIN, MT_EINT_POL_POS);
227 mt_eint_unmask(IDDIG_EINT_PIN);
228 #else
229 irq_set_irq_type(mtk_idpin_irqnum, IRQF_TRIGGER_HIGH);
230 //enable_irq(mtk_idpin_irqnum);
231 #endif
232 }
233
234 static bool mtktest_set_iddig_in_detect(){
235 #if 0
236 mt_eint_set_polarity(IDDIG_EINT_PIN, MT_EINT_POL_NEG);
237 mt_eint_unmask(IDDIG_EINT_PIN);
238 #else
239 irq_set_irq_type(mtk_idpin_irqnum, IRQF_TRIGGER_LOW);
240 //enable_irq(mtk_idpin_irqnum);
241 #endif
242 }
243 #if 0
244 static void mtktest_xhci_eint_iddig_isr(void){
245 #else
246 static irqreturn_t mtktest_xhci_eint_iddig_isr(int irqnum, void *data){
247 #endif
248 bool cur_id_state = mtk_id_nxt_state;
249
250 if(cur_id_state == IDPIN_IN){ // HOST
251 /* open port power and switch resource to host */
252 //mtk_switch2host();
253 /* assert port power bit to drive drv_vbus */
254 //mtktest_enableXhciAllPortPower(mtk_xhci);
255
256 /* expect next isr is for id-pin out action */
257 mtk_id_nxt_state = IDPIN_OUT;
258
259 g_otg_iddig = 0 ;
260 mb() ;
261
262 /* make id pin to detect the plug-out */
263 mtktest_set_iddig_out_detect();
264
265 //writel(SSUSB_ATTACH_A_ROLE, SSUSB_OTG_STS);
266 }
267 else{ /* IDPIN_OUT */
268 /* deassert port power bit to drop off vbus */
269 //mtktest_disableXhciAllPortPower(mtk_xhci);
270 /* close all port power, but not switch the resource */
271 //mtk_switch2device(false);
272 /* expect next isr is for id-pin in action */
273 mtk_id_nxt_state = IDPIN_IN;
274
275 g_otg_iddig = 1 ;
276 mb() ;
277 /* make id pin to detect the plug-in */
278 mtktest_set_iddig_in_detect();
279
280 //writel(SSUSB_ATTACH_B_ROLE, SSUSB_OTG_STS);
281 }
282
283 printk("[OTG_H] xhci switch resource to %s\n", (cur_id_state == IDPIN_IN)? "host": "device");
284 }
285
286 void mtktest_mtk_xhci_set(struct xhci_hcd *xhci){
287 mtk_xhci = xhci;
288 }
289
290 void mtktest_mtk_xhci_eint_iddig_init(void){
291 #if 0
292 mt_eint_set_sens(IDDIG_EINT_PIN, MT_LEVEL_SENSITIVE);
293 mt_eint_set_hw_debounce(IDDIG_EINT_PIN,64);
294 #else
295 int retval;
296
297 mtk_idpin_irqnum = mt_gpio_to_irq(IDDIG_EINT_PIN);
298 /* microseconds */
299 mt_gpio_set_debounce(IDDIG_EINT_PIN, 50);
300 #endif
301 mtktest_set_iddig_in_detect();
302
303 #if 0
304 mt_eint_registration(IDDIG_EINT_PIN, EINTF_TRIGGER_LOW, mtktest_xhci_eint_iddig_isr, false);
305 #else
306 retval =
307 request_irq(mtk_idpin_irqnum, mtktest_xhci_eint_iddig_isr, IRQF_TRIGGER_LOW, "usbif_iddig_eint",
308 NULL);
309 #endif
310
311 enable_irq(mtk_idpin_irqnum);
312
313 g_otg_iddig = 1 ;
314
315 printk("[OTG_H] XHCI test driver GPIO iddig setting done.\n");
316 }
317
318 void mtktest_mtk_xhci_eint_iddig_deinit(void){
319 #if 0
320 mt_eint_registration(IDDIG_EINT_PIN, EINTF_TRIGGER_LOW, NULL, false);
321 #else
322 disable_irq_nosync(mtk_idpin_irqnum);
323
324 free_irq(mtk_idpin_irqnum, NULL);
325
326 #endif
327
328 printk("[OTG_H] XHCI test driver GPIO iddig deinit done.\n");
329 }
330
331
332 #endif
333
334 // xhci original functions
335
336 /* TODO: copied from ehci-hcd.c - can this be refactored? */
337 /*
338 * handshake - spin reading hc until handshake completes or fails
339 * @ptr: address of hc register to be read
340 * @mask: bits to look at in result of read
341 * @done: value of those bits when handshake succeeds
342 * @usec: timeout in microseconds
343 *
344 * Returns negative errno, or zero on success
345 *
346 * Success happens when the "mask" bits have the specified value (hardware
347 * handshake done). There are two failure modes: "usec" have passed (major
348 * hardware flakeout), or the register reads as all-ones (hardware removed).
349 */
350 static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
351 u32 mask, u32 done, int usec)
352 {
353 u32 result;
354
355 do {
356 result = xhci_readl(xhci, ptr);
357 if (result == ~(u32)0) /* card removed */
358 return -ENODEV;
359 result &= mask;
360 if (result == done)
361 return 0;
362 udelay(1);
363 usec--;
364 } while (usec > 0);
365 return -ETIMEDOUT;
366 }
367
368 /*
369 * Disable interrupts and begin the xHCI halting process.
370 */
371 void mtktest_xhci_quiesce(struct xhci_hcd *xhci)
372 {
373 u32 halted;
374 u32 cmd;
375 u32 mask;
376
377 mask = ~(XHCI_IRQS);
378 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
379 if (!halted)
380 mask &= ~CMD_RUN;
381
382 cmd = xhci_readl(xhci, &xhci->op_regs->command);
383 cmd &= mask;
384 xhci_writel(xhci, cmd, &xhci->op_regs->command);
385 }
386
387 /*
388 * Force HC into halt state.
389 *
390 * Disable any IRQs and clear the run/stop bit.
391 * HC will complete any current and actively pipelined transactions, and
392 * should halt within 16 microframes of the run/stop bit being cleared.
393 * Read HC Halted bit in the status register to see when the HC is finished.
394 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
395 */
396 int mtktest_xhci_halt(struct xhci_hcd *xhci)
397 {
398 xhci_dbg(xhci, "// Halt the HC\n");
399 mtktest_xhci_quiesce(xhci);
400
401 return handshake(xhci, &xhci->op_regs->status,
402 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
403 }
404 /*
405 * Set the run bit and wait for the host to be running.
406 */
407 int xhci_start(struct xhci_hcd *xhci)
408 {
409 u32 temp;
410 int ret;
411
412 temp = xhci_readl(xhci, &xhci->op_regs->command);
413 temp |= (CMD_RUN);
414 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
415 temp);
416 xhci_writel(xhci, temp, &xhci->op_regs->command);
417
418 /*
419 * Wait for the HCHalted Status bit to be 0 to indicate the host is
420 * running.
421 */
422 ret = handshake(xhci, &xhci->op_regs->status,
423 STS_HALT, 0, XHCI_MAX_HALT_USEC);
424 if (ret == -ETIMEDOUT)
425 xhci_err(xhci, "[ERROR]Host took too long to start, "
426 "waited %u microseconds.\n",
427 XHCI_MAX_HALT_USEC);
428 return ret;
429 }
430
431 /*
432 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
433 *
434 * This resets pipelines, timers, counters, state machines, etc.
435 * Transactions will be terminated immediately, and operational registers
436 * will be set to their defaults.
437 */
438 int mtktest_xhci_reset(struct xhci_hcd *xhci)
439 {
440 u32 command;
441 u32 state;
442 int ret;
443
444 state = xhci_readl(xhci, &xhci->op_regs->status);
445 if ((state & STS_HALT) == 0) {
446 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
447 return 0;
448 }
449
450 xhci_dbg(xhci, "// Reset the HC\n");
451 command = xhci_readl(xhci, &xhci->op_regs->command);
452 command |= CMD_RESET;
453 xhci_writel(xhci, command, &xhci->op_regs->command);
454 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
455 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
456
457 ret = handshake(xhci, &xhci->op_regs->command,
458 CMD_RESET, 0, 250 * 1000);
459 if (ret)
460 return ret;
461
462 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
463 /*
464 * xHCI cannot write to any doorbells or operational registers other
465 * than status until the "Controller Not Ready" flag is cleared.
466 */
467 return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
468 }
469
470 /*
471 * Initialize memory for HCD and xHC (one-time init).
472 *
473 * Program the PAGESIZE register, initialize the device context array, create
474 * device contexts (?), set up a command ring segment (or two?), create event
475 * ring (one for now).
476 */
477 int mtktest_xhci_init(struct usb_hcd *hcd)
478 {
479 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
480 int retval = 0;
481
482 xhci_dbg(xhci, "mtktest_xhci_init\n");
483 spin_lock_init(&xhci->lock);
484 if (link_quirk) {
485 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
486 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
487 } else {
488 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
489 }
490 retval = mtktest_xhci_mem_init(xhci, GFP_KERNEL);
491 xhci_dbg(xhci, "Finished mtktest_xhci_init\n");
492
493 return retval;
494 }
495
496 /*-------------------------------------------------------------------------*/
497
498 /**
499 * mtktest_xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
500 * HCDs. Find the index for an endpoint given its descriptor. Use the return
501 * value to right shift 1 for the bitmask.
502 *
503 * Index = (epnum * 2) + direction - 1,
504 * where direction = 0 for OUT, 1 for IN.
505 * For control endpoints, the IN index is used (OUT index is unused), so
506 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
507 */
508 unsigned int mtktest_xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
509 {
510 unsigned int index;
511 if (usb_endpoint_xfer_control(desc))
512 index = (unsigned int) (usb_endpoint_num(desc)*2);
513 else
514 index = (unsigned int) (usb_endpoint_num(desc)*2) +
515 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
516 return index;
517 }
518
519 /* Find the flag for this endpoint (for use in the control context). Use the
520 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
521 * bit 1, etc.
522 */
523 unsigned int mtktest_xhci_get_endpoint_flag_from_index(unsigned int ep_index)
524 {
525 return 1 << (ep_index + 1);
526 }
527
528
529 /* Compute the last valid endpoint context index. Basically, this is the
530 * endpoint index plus one. For slot contexts with more than valid endpoint,
531 * we find the most significant bit set in the added contexts flags.
532 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
533 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
534 */
535 unsigned int mtktest_xhci_last_valid_endpoint(u32 added_ctxs)
536 {
537 return fls(added_ctxs) - 1;
538 }
539
540 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
541 struct xhci_container_ctx *in_ctx,
542 struct xhci_container_ctx *out_ctx,
543 u32 add_flags, u32 drop_flags)
544 {
545 struct xhci_input_control_ctx *ctrl_ctx;
546 ctrl_ctx = mtktest_xhci_get_input_control_ctx(xhci, in_ctx);
547 ctrl_ctx->add_flags = add_flags;
548 ctrl_ctx->drop_flags = drop_flags;
549 mtktest_xhci_slot_copy(xhci, in_ctx, out_ctx);
550 ctrl_ctx->add_flags |= SLOT_FLAG;
551
552 xhci_dbg(xhci, "Input Context:\n");
553 mtktest_xhci_dbg_ctx(xhci, in_ctx, mtktest_xhci_last_valid_endpoint(add_flags));
554 }
555
556 void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
557 unsigned int slot_id, unsigned int ep_index,
558 struct xhci_dequeue_state *deq_state)
559 {
560 struct xhci_container_ctx *in_ctx;
561 struct xhci_ep_ctx *ep_ctx;
562 u32 added_ctxs;
563 dma_addr_t addr;
564
565 mtktest_xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
566 xhci->devs[slot_id]->out_ctx, ep_index);
567 in_ctx = xhci->devs[slot_id]->in_ctx;
568 ep_ctx = mtktest_xhci_get_ep_ctx(xhci, in_ctx, ep_index);
569 addr = mtktest_xhci_trb_virt_to_dma(deq_state->new_deq_seg,
570 deq_state->new_deq_ptr);
571 if (addr == 0) {
572 xhci_warn(xhci, "WARN Cannot submit config ep after "
573 "reset ep command\n");
574 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
575 deq_state->new_deq_seg,
576 deq_state->new_deq_ptr);
577 return;
578 }
579 ep_ctx->deq = addr | deq_state->new_cycle_state;
580
581 added_ctxs = mtktest_xhci_get_endpoint_flag_from_index(ep_index);
582 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
583 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
584 }
585
586 /* hc interface non-used functions */
587 int mtktest_xhci_mtk_run(struct usb_hcd *hcd){
588 printk("mtktest_xhci_mtk_run is called\n");
589 u32 temp;
590 u64 temp_64;
591 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
592 void (*doorbell)(struct xhci_hcd *) = NULL;
593
594 hcd->uses_new_polling = 1;
595 // hcd->poll_rh = 0;
596
597 xhci_dbg(xhci, "mtktest_xhci_run\n");
598 #if 0 /* FIXME: MSI not setup yet */
599 /* Do this at the very last minute */
600 ret = xhci_setup_msix(xhci);
601 if (!ret)
602 return ret;
603
604 return -ENOSYS;
605 #endif
606 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
607 init_timer(&xhci->event_ring_timer);
608 xhci->event_ring_timer.data = (unsigned long) xhci;
609 xhci->event_ring_timer.function = xhci_event_ring_work;
610 /* Poll the event ring */
611 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
612 xhci->zombie = 0;
613 xhci_dbg(xhci, "Setting event ring polling timer\n");
614 add_timer(&xhci->event_ring_timer);
615 #endif
616
617 xhci_dbg(xhci, "Command ring memory map follows:\n");
618 mtktest_xhci_debug_ring(xhci, xhci->cmd_ring);
619 mtktest_xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
620 mtktest_xhci_dbg_cmd_ptrs(xhci);
621
622 xhci_dbg(xhci, "ERST memory map follows:\n");
623 mtktest_xhci_dbg_erst(xhci, &xhci->erst);
624 xhci_dbg(xhci, "Event ring:\n");
625 mtktest_xhci_debug_ring(xhci, xhci->event_ring);
626 mtktest_xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
627 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
628 temp_64 &= ~ERST_PTR_MASK;
629 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
630
631 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
632 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
633 temp &= ~ER_IRQ_INTERVAL_MASK;
634 temp |= (u32) 160;
635 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
636
637 /* Set the HCD state before we enable the irqs */
638 hcd->state = HC_STATE_RUNNING;
639 temp = xhci_readl(xhci, &xhci->op_regs->command);
640 temp |= (CMD_EIE);
641 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
642 temp);
643 xhci_writel(xhci, temp, &xhci->op_regs->command);
644
645 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
646 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
647 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
648 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
649 &xhci->ir_set->irq_pending);
650 mtktest_xhci_print_ir_set(xhci, xhci->ir_set, 0);
651
652 if (NUM_TEST_NOOPS > 0)
653 doorbell = mtktest_xhci_setup_one_noop(xhci);
654 #if 0
655 if (xhci->quirks & XHCI_NEC_HOST)
656 mtktest_xhci_queue_address_device(xhci, 0, 0, 0,
657 TRB_TYPE(TRB_NEC_GET_FW));
658 #endif
659 if (xhci_start(xhci)) {
660 mtktest_xhci_halt(xhci);
661 return -ENODEV;
662 }
663
664 xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
665 if (doorbell)
666 (*doorbell)(xhci);
667 #if 0
668 if (xhci->quirks & XHCI_NEC_HOST)
669 mtktest_xhci_ring_cmd_db(xhci);
670 #endif
671
672 #if 0 // USBIF
673 mtktest_mtk_xhci_set(xhci);
674 mtktest_mtk_xhci_eint_iddig_init();
675 #else
676
677 #if TEST_OTG
678 mb() ;
679 if(!g_otg_test){
680 #endif
681 mtktest_enableXhciAllPortPower(xhci);
682 #if TEST_OTG
683 }
684 #endif
685 #endif
686 msleep(50);
687
688 mtktest_disableAllClockPower();
689 xhci_dbg(xhci, "Finished mtktest_xhci_run\n");
690 return 0;
691 }
692
693 void mtktest_xhci_mtk_stop(struct usb_hcd *hcd){
694 printk("mtktest_xhci_mtk_stop is called\n");
695 u32 temp;
696 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
697
698 #ifdef TEST_OTG_IDDIG // USBIF
699 mtktest_disableXhciAllPortPower(xhci);
700 mtktest_mtk_xhci_eint_iddig_deinit();
701
702 #endif
703
704 spin_lock_irq(&xhci->lock);
705 mtktest_xhci_halt(xhci);
706 mtktest_xhci_reset(xhci);
707 spin_unlock_irq(&xhci->lock);
708
709 #if 0 /* No MSI yet */
710 xhci_cleanup_msix(xhci);
711 #endif
712 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
713 /* Tell the event ring poll function not to reschedule */
714 xhci->zombie = 1;
715 del_timer_sync(&xhci->event_ring_timer);
716 #endif
717
718 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
719 temp = xhci_readl(xhci, &xhci->op_regs->status);
720 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
721 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
722 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
723 &xhci->ir_set->irq_pending);
724 mtktest_xhci_print_ir_set(xhci, xhci->ir_set, 0);
725
726 xhci_dbg(xhci, "cleaning up memory\n");
727 mtktest_xhci_mem_cleanup(xhci);
728 xhci_dbg(xhci, "mtktest_xhci_stop completed - status = %x\n",
729 xhci_readl(xhci, &xhci->op_regs->status));
730 //mtktest_resetIP();
731 }
732
733 void mtktest_xhci_mtk_shutdown(struct usb_hcd *hcd){
734 printk("mtktest_xhci_mtk_shutdown is called\n");
735 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
736
737 spin_lock_irq(&xhci->lock);
738 mtktest_xhci_halt(xhci);
739 spin_unlock_irq(&xhci->lock);
740
741 #if 0
742 xhci_cleanup_msix(xhci);
743 #endif
744
745 xhci_dbg(xhci, "mtktest_xhci_shutdown completed - status = %x\n",
746 xhci_readl(xhci, &xhci->op_regs->status));
747 }
748
749 int mtktest_xhci_mtk_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags){
750 printk("mtktest_xhci_mtk_urb_enqueue is called\n");
751 }
752
753 int mtktest_xhci_mtk_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status){
754 printk("mtktest_xhci_mtk_urb_dequeue is called\n");
755 }
756
757 int mtktest_xhci_mtk_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev){
758 printk("mtktest_xhci_mtk_alloc_dev is called\n");
759 }
760
761 void mtktest_xhci_mtk_free_dev(struct usb_hcd *hcd, struct usb_device *udev){
762 printk("mtktest_xhci_mtk_free_dev is called\n");
763 }
764
765 int mtktest_xhci_mtk_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev
766 , struct usb_host_endpoint **eps, unsigned int num_eps,
767 unsigned int num_streams, gfp_t mem_flags){
768 printk("mtktest_xhci_mtk_alloc_streams is called\n");
769 }
770
771 int mtktest_xhci_mtk_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
772 struct usb_host_endpoint **eps, unsigned int num_eps,
773 gfp_t mem_flags){
774 printk("mtktest_xhci_mtk_free_streams is called\n");
775 }
776
777 int mtktest_xhci_mtk_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep){
778 struct xhci_hcd *xhci;
779 struct xhci_container_ctx *in_ctx, *out_ctx;
780 unsigned int ep_index;
781 struct xhci_ep_ctx *ep_ctx;
782 struct xhci_slot_ctx *slot_ctx;
783 struct xhci_input_control_ctx *ctrl_ctx;
784 u32 added_ctxs;
785 unsigned int last_ctx;
786 u32 new_add_flags, new_drop_flags, new_slot_info;
787 int ret = 0;
788 #if 0
789 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
790 if (ret <= 0) {
791 /* So we won't queue a reset ep command for a root hub */
792 ep->hcpriv = NULL;
793 return ret;
794 }
795 #endif
796 xhci = hcd_to_xhci(hcd);
797
798 added_ctxs = mtktest_xhci_get_endpoint_flag(&ep->desc);
799 last_ctx = mtktest_xhci_last_valid_endpoint(added_ctxs);
800 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
801 /* FIXME when we have to issue an evaluate endpoint command to
802 * deal with ep0 max packet size changing once we get the
803 * descriptors
804 */
805 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
806 __func__, added_ctxs);
807 return 0;
808 }
809
810 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
811 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
812 __func__);
813 return -EINVAL;
814 }
815
816 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
817 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
818 ctrl_ctx = mtktest_xhci_get_input_control_ctx(xhci, in_ctx);
819 ep_index = mtktest_xhci_get_endpoint_index(&ep->desc);
820 ep_ctx = mtktest_xhci_get_ep_ctx(xhci, out_ctx, ep_index);
821 /* If the HCD has already noted the endpoint is enabled,
822 * ignore this request.
823 */
824 if (ctrl_ctx->add_flags & mtktest_xhci_get_endpoint_flag(&ep->desc)) {
825 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
826 __func__, ep);
827 return 0;
828 }
829
830 /*
831 * Configuration and alternate setting changes must be done in
832 * process context, not interrupt context (or so documenation
833 * for usb_set_interface() and usb_set_configuration() claim).
834 */
835 if (mtktest_xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
836 udev, ep, GFP_NOIO) < 0) {
837 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
838 __func__, ep->desc.bEndpointAddress);
839 return -ENOMEM;
840 }
841
842 ctrl_ctx->add_flags |= added_ctxs;
843 new_add_flags = ctrl_ctx->add_flags;
844
845 /* If xhci_endpoint_disable() was called for this endpoint, but the
846 * xHC hasn't been notified yet through the check_bandwidth() call,
847 * this re-adds a new state for the endpoint from the new endpoint
848 * descriptors. We must drop and re-add this endpoint, so we leave the
849 * drop flags alone.
850 */
851 new_drop_flags = ctrl_ctx->drop_flags;
852
853 slot_ctx = mtktest_xhci_get_slot_ctx(xhci, in_ctx);
854 /* Update the last valid endpoint context, if we just added one past */
855 if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
856 slot_ctx->dev_info &= ~LAST_CTX_MASK;
857 slot_ctx->dev_info |= LAST_CTX(last_ctx);
858 }
859 new_slot_info = slot_ctx->dev_info;
860
861 /* Store the usb_device pointer for later use */
862 ep->hcpriv = udev;
863
864 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
865 (unsigned int) ep->desc.bEndpointAddress,
866 udev->slot_id,
867 (unsigned int) new_drop_flags,
868 (unsigned int) new_add_flags);
869 xhci_dbg(xhci, "new slot context 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n"
870 , slot_ctx->dev_info, slot_ctx->dev_info2, slot_ctx->tt_info, slot_ctx->dev_state
871 , slot_ctx->reserved[0], slot_ctx->reserved[1], slot_ctx->reserved[2], slot_ctx->reserved[3]);
872 return 0;
873 }
874
875 int mtktest_xhci_mtk_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev
876 , struct usb_host_endpoint *ep){
877 struct xhci_hcd *xhci;
878 struct xhci_container_ctx *in_ctx, *out_ctx;
879 struct xhci_input_control_ctx *ctrl_ctx;
880 struct xhci_slot_ctx *slot_ctx;
881 unsigned int last_ctx;
882 unsigned int ep_index;
883 struct xhci_ep_ctx *ep_ctx;
884 u32 drop_flag;
885 u32 new_add_flags, new_drop_flags, new_slot_info;
886 int ret;
887
888 xhci = hcd_to_xhci(hcd);
889 if (xhci->xhc_state & XHCI_STATE_DYING)
890 return -ENODEV;
891 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
892 drop_flag = mtktest_xhci_get_endpoint_flag(&ep->desc);
893 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
894 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
895 __func__, drop_flag);
896 return 0;
897 }
898
899 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
900 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
901 ctrl_ctx = mtktest_xhci_get_input_control_ctx(xhci, in_ctx);
902 ep_index = mtktest_xhci_get_endpoint_index(&ep->desc);
903 ep_ctx = mtktest_xhci_get_ep_ctx(xhci, out_ctx, ep_index);
904
905 /* If the HC already knows the endpoint is disabled,
906 * or the HCD has noted it is disabled, ignore this request
907 */
908 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
909 EP_STATE_DISABLED ||
910 le32_to_cpu(ctrl_ctx->drop_flags) &
911 mtktest_xhci_get_endpoint_flag(&ep->desc)) {
912 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
913 __func__, ep);
914 return 0;
915 }
916
917 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
918 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
919
920 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
921 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
922
923 last_ctx = mtktest_xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
924 slot_ctx = mtktest_xhci_get_slot_ctx(xhci, in_ctx);
925 /* Update the last valid endpoint context, if we deleted the last one */
926 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
927 LAST_CTX(last_ctx)) {
928 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
929 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
930 }
931 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
932
933 mtktest_xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
934
935 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
936 (unsigned int) ep->desc.bEndpointAddress,
937 udev->slot_id,
938 (unsigned int) new_drop_flags,
939 (unsigned int) new_add_flags,
940 (unsigned int) new_slot_info);
941 return 0;
942 }
943 void mtktest_xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
944 struct usb_device *udev, unsigned int ep_index)
945 {
946 struct xhci_dequeue_state deq_state;
947 struct xhci_virt_ep *ep;
948
949 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
950 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
951 /* We need to move the HW's dequeue pointer past this TD,
952 * or it will attempt to resend it on the next doorbell ring.
953 */
954 mtktest_xhci_find_new_dequeue_state(xhci, udev->slot_id,
955 ep_index, ep->stopped_stream, ep->stopped_td,
956 &deq_state);
957
958 /* HW with the reset endpoint quirk will use the saved dequeue state to
959 * issue a configure endpoint command later.
960 */
961 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
962 xhci_dbg(xhci, "Queueing new dequeue state\n");
963 mtktest_xhci_queue_new_dequeue_state(xhci, udev->slot_id,
964 ep_index, ep->stopped_stream, &deq_state);
965 } else {
966 /* Better hope no one uses the input context between now and the
967 * reset endpoint completion!
968 * XXX: No idea how this hardware will react when stream rings
969 * are enabled.
970 */
971 xhci_dbg(xhci, "Setting up input context for "
972 "configure endpoint command\n");
973 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
974 ep_index, &deq_state);
975 }
976 }
977
978 void mtktest_xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
979 {
980 struct xhci_input_control_ctx *ctrl_ctx;
981 struct xhci_ep_ctx *ep_ctx;
982 struct xhci_slot_ctx *slot_ctx;
983 int i;
984
985 /* When a device's add flag and drop flag are zero, any subsequent
986 * configure endpoint command will leave that endpoint's state
987 * untouched. Make sure we don't leave any old state in the input
988 * endpoint contexts.
989 */
990 ctrl_ctx = mtktest_xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
991 ctrl_ctx->drop_flags = 0;
992 ctrl_ctx->add_flags = 0;
993 slot_ctx = mtktest_xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
994 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
995 /* Endpoint 0 is always valid */
996 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
997 for (i = 1; i < 31; ++i) {
998 ep_ctx = mtktest_xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
999 ep_ctx->ep_info = 0;
1000 ep_ctx->ep_info2 = 0;
1001 ep_ctx->deq = 0;
1002 ep_ctx->tx_info = 0;
1003 }
1004 }
1005
1006
1007 void mtktest_xhci_mtk_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep){
1008 printk("mtktest_xhci_mtk_endpoint_reset is called\n");
1009 }
1010
1011 int mtktest_xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev){
1012 printk("mtktest_xhci_mtk_check_bandwidth is called\n");
1013 }
1014
1015 void mtktest_xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev){
1016 printk("mtktest_xhci_mtk_reset_bandwidth is called\n");
1017 }
1018
1019 int mtktest_xhci_mtk_address_device(struct usb_hcd *hcd, struct usb_device *udev){
1020 printk("mtktest_xhci_mtk_address_device is called\n");
1021 }
1022
1023 int mtktest_xhci_mtk_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
1024 struct usb_tt *tt, gfp_t mem_flags){
1025 printk("mtktest_xhci_mtk_update_hub_device is called\n");
1026 }
1027
1028 int mtktest_xhci_mtk_reset_device(struct usb_hcd *hcd, struct usb_device *udev){
1029 printk("mtktest_xhci_mtk_reset_device is called\n");
1030 }
1031
1032 int mtktest_xhci_mtk_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1033 u16 wIndex, char *buf, u16 wLength){
1034 printk("mtktest_xhci_mtk_hub_control is called\n");
1035 return 0;
1036 }
1037
1038 int mtktest_xhci_mtk_hub_status_data(struct usb_hcd *hcd, char *buf){
1039 printk("mtktest_xhci_mtk_hub_status_data is called\n");
1040 return 0;
1041 }
1042
1043 int mtktest_xhci_mtk_get_frame(struct usb_hcd *hcd){
1044 printk("mtktest_xhci_mtk_get_frame is called\n");
1045 }
1046 #if defined(CONFIG_MTK_LM_MODE)
1047 #define MTK_XHCI_DMA_BIT_MASK DMA_BIT_MASK(64)
1048 #else
1049 #define MTK_XHCI_DMA_BIT_MASK DMA_BIT_MASK(32)
1050 #endif
1051
1052 static u64 dummy_mask = MTK_XHCI_DMA_BIT_MASK;
1053
1054 static struct platform_device xhci_platform_dev = {
1055 .name = hcd_name,
1056 .id = -1,
1057 .dev = {
1058 // .dma_mask = &dummy_mask,
1059 .coherent_dma_mask = MTK_XHCI_DMA_BIT_MASK,
1060 .release = xhci_hcd_release,
1061 },
1062 };
1063
1064 #if 0
1065 #define U3_MAC_TX_FIFO_WAIT_EMPTY_ADDR 0xf0041144
1066
1067 void setMacFIFOWaitEmptyValue(){
1068 __u32 __iomem *mac_tx_fifo_wait_empty_addr;
1069 u32 mac_tx_fifo_wait_empty_value;
1070 mac_tx_fifo_wait_empty_addr = U3_MAC_TX_FIFO_WAIT_EMPTY_ADDR;
1071 mac_tx_fifo_wait_empty_value = 0x5;
1072 writel(mac_tx_fifo_wait_empty_value, mac_tx_fifo_wait_empty_addr);
1073 }
1074 #endif
1075
1076
1077 //initial MAC3 register, should be called after HC reset and before set PP=1 of each port
1078 void mtktest_setInitialReg(){
1079 __u32 __iomem *addr;
1080 u32 temp;
1081 int num_u3_port;
1082
1083 num_u3_port = SSUSB_U3_PORT_NUM(readl((void __iomem *)SSUSB_IP_CAP));
1084
1085 printk("[OTG_H] mtktest_setInitialReg , num_u3_port = %d\n", num_u3_port);
1086 // USBIF , we should enable it in real chip
1087 if(num_u3_port ){
1088 //set MAC reference clock speed
1089 addr = SSUSB_U3_MAC_BASE+U3_UX_EXIT_LFPS_TIMING_PAR;
1090 temp = readl(addr);
1091 temp &= ~(0xff << U3_RX_UX_EXIT_LFPS_REF_OFFSET);
1092 temp |= (U3_RX_UX_EXIT_LFPS_REF << U3_RX_UX_EXIT_LFPS_REF_OFFSET);
1093 writel(temp, addr);
1094 addr = SSUSB_U3_MAC_BASE+U3_REF_CK_PAR;
1095 temp = readl(addr);
1096 temp &= ~(0xff);
1097 temp |= U3_REF_CK_VAL;
1098 writel(temp, addr);
1099
1100 //set SYS_CK
1101 addr = SSUSB_U3_SYS_BASE+U3_TIMING_PULSE_CTRL;
1102 temp = readl(addr);
1103 temp &= ~(0xff);
1104 temp |= CNT_1US_VALUE;
1105 writel(temp, addr);
1106 }
1107
1108 addr = SSUSB_U2_SYS_BASE+USB20_TIMING_PARAMETER;
1109 temp &= ~(0xff);
1110 temp |= TIME_VALUE_1US;
1111 writel(temp, addr);
1112
1113 // USBIF , USBIF , we should enable it in real chip
1114 if(num_u3_port ){
1115 //set LINK_PM_TIMER=3
1116 addr = SSUSB_U3_SYS_BASE+LINK_PM_TIMER;
1117 temp = readl(addr);
1118 temp &= ~(0xf);
1119 temp |= PM_LC_TIMEOUT_VALUE;
1120 writel(temp, addr);
1121 }
1122
1123 u3phy_init();
1124 }
1125
1126 void mtktest_setLatchSel(){
1127 __u32 __iomem *latch_sel_addr;
1128 u32 latch_sel_value;
1129
1130 if(g_num_u3_port <= 0)
1131 return;
1132
1133 latch_sel_addr = U3_PIPE_LATCH_SEL_ADD;
1134 latch_sel_value = ((U3_PIPE_LATCH_TX)<<2) | (U3_PIPE_LATCH_RX);
1135 writel(latch_sel_value, latch_sel_addr);
1136 }
1137
1138 void mtktest_reinitIP(){
1139 __u32 __iomem *ip_reset_addr;
1140 u32 ip_reset_value;
1141
1142 // reset host IP
1143 //writel(SSUSB_XHCI_SW_RST, SSUSB_XHCI_RST_CTRL);
1144 //msleep(10);
1145 //writel(0, SSUSB_XHCI_RST_CTRL);
1146 //msleep(500);
1147
1148 //enable clock/gating, include re-init IP in IPPC
1149 mtktest_enableAllClockPower();
1150 /* set MAC3 PIPE latch */
1151 mtktest_setLatchSel();
1152 mtktest_mtk_xhci_scheduler_init();
1153
1154 }
1155
1156 int mtk_xhci_hcd_init(void)
1157 {
1158 int retval = 0;
1159 __u32 __iomem *ip_reset_addr;
1160 u32 ip_reset_value;
1161 struct platform_device *pPlatformDev;
1162
1163 printk(KERN_ERR "Module Init start!\n");
1164 //mtktest_resetIP
1165 mtktest_reinitIP();
1166
1167 retval = platform_driver_register(&xhci_versatile_driver);
1168 if (retval < 0)
1169 {
1170 printk(KERN_ERR "Problem registering platform driver.");
1171 return retval;
1172 }
1173
1174 pPlatformDev = &xhci_platform_dev;
1175 memset(pPlatformDev, 0, sizeof(struct platform_device));
1176 pPlatformDev->name = hcd_name;
1177 pPlatformDev->id = -1;
1178 pPlatformDev->dev.coherent_dma_mask = MTK_XHCI_DMA_BIT_MASK;
1179 pPlatformDev->dev.release = xhci_hcd_release;
1180 retval = platform_device_register(&xhci_platform_dev);
1181 if (retval < 0)
1182 {
1183 platform_driver_unregister (&xhci_versatile_driver);
1184 }
1185 printk(KERN_ERR "Module Init success!\n");
1186 //mtktest_setInitialReg();
1187
1188 mtktest_mtk_xhci_eint_iddig_init() ;
1189 /*
1190 * Check the compiler generated sizes of structures that must be laid
1191 * out in specific ways for hardware access.
1192 */
1193 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
1194 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
1195 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
1196 /* xhci_device_control has eight fields, and also
1197 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
1198 */
1199 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
1200 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
1201 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
1202 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
1203 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
1204 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
1205 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
1206 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
1207 return 0;
1208 }
1209
1210 void mtk_xhci_hcd_cleanup(void)
1211 {
1212 //xhci_unregister_pci();
1213 uint32_t nCount;
1214 uint32_t i;
1215 struct platform_device *pPlatformDev;
1216
1217 mtktest_mtk_xhci_eint_iddig_deinit() ;
1218
1219 platform_device_unregister(&xhci_platform_dev);
1220 platform_driver_unregister(&xhci_versatile_driver);
1221 }
1222