2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/irq.h>
24 #include <linux/log2.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/slab.h>
29 #include <linux/kernel.h> /* printk() */
30 #include <linux/fs.h> /* everything... */
31 #include <linux/errno.h> /* error codes */
32 #include <linux/types.h> /* size_t */
33 #include <linux/proc_fs.h>
34 #include <linux/fcntl.h> /* O_ACCMODE */
35 #include <linux/seq_file.h>
36 #include <linux/cdev.h>
37 //#include <linux/pci.h>
38 #include <asm/unaligned.h>
39 //#include <linux/usb/hcd.h>
42 #include "mtk-test-lib.h"
43 #include "xhci-platform.c"
44 #include "mtk-usb-hcd.h"
45 #include "xhci-mtk-power.h"
46 #include "xhci-mtk-scheduler.h"
49 #include <mach/eint.h>
50 #include <linux/irq.h>
51 //#include <linux/switch.h>
54 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
55 static int link_quirk
;
57 static void xhci_work(struct xhci_hcd
*xhci
){
61 * Clear the op reg interrupt status first,
62 * so we can receive interrupts from other MSI-X interrupters.
63 * Write 1 to clear the interrupt status.
65 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
66 printk(KERN_ERR
"[OTG_H][xhci_work] read status 0x%x\n", temp
);
69 xhci_writel(xhci
, temp
, &xhci
->op_regs
->status
);
71 /* Acknowledge the interrupt */
72 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
73 printk(KERN_ERR
"[OTG_H][xhci_work] read irq_pending 0x%x\n", temp
);
76 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_pending
);
78 /* Flush posted writes */
79 xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
81 while(mtktest_xhci_handle_event(xhci
) > 0){}
83 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
84 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
85 xhci_dbg(xhci
, "Clear EHB bit (RW1C)");
86 xhci_write_64(xhci
, temp_64
| ERST_EHB
, &xhci
->ir_set
->erst_dequeue
);
87 /* Flush posted writes -- FIXME is this necessary? */
88 xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
91 irqreturn_t
mtktest_xhci_mtk_irq(struct usb_hcd
*hcd
){
92 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
99 spin_lock(&xhci
->lock
);
101 temp3
= readl(SSUSB_OTG_STS
);
102 printk(KERN_ERR
"[OTG_H][IRQ] OTG_STS 0x%x\n", temp3
);
104 if(temp3
& SSUSB_IDDIG
){
112 if(temp3
& SSUSB_ATTACH_A_ROLE
){
114 //set OTG_VBUSVALID_SEL for host
115 temp
= readl(SSUSB_U2_CTRL(0));
116 temp
= temp
| SSUSB_U2_PORT_OTG_HOST_VBUSVALID_SEL
;
117 writel(temp
, SSUSB_U2_CTRL(0));
119 //attached as device-A, turn on port power of all xhci port
120 mtktest_enableXhciAllPortPower(xhci
);
121 writel(SSUSB_ATTACH_A_ROLE_CLR
, SSUSB_OTG_STS_CLR
);
122 spin_unlock(&xhci
->lock
);
126 if(temp3
& SSUSB_CHG_A_ROLE_A
){
127 g_otg_wait_con
= true;
128 g_otg_hnp_become_dev
= false ;
129 g_otg_hnp_become_host
= true;
131 writel(SSUSB_CHG_A_ROLE_A_CLR
, SSUSB_OTG_STS_CLR
);
133 printk(KERN_ERR
"[OTG_H] going to set dma to host\n");
137 writel(0x0f0f0f0f, 0xf00447bc);
138 while((readl(0xf00447c4) & 0x2000) == 0x2000){
141 printk(KERN_ERR
"[OTG_H] can set dma to host\n");
143 temp
= readl(SSUSB_U2_CTRL(0));
144 temp
= temp
| SSUSB_U2_PORT_HOST_SEL
;
145 writel(temp
, SSUSB_U2_CTRL(0));
146 spin_unlock(&xhci
->lock
);
149 if(temp3
& SSUSB_CHG_B_ROLE_A
){
150 printk("[OTG_H] get SSUSB_CHG_B_ROLE_A\n");
151 g_otg_hnp_become_dev
= true;
153 writel(SSUSB_CHG_B_ROLE_A_CLR
, SSUSB_OTG_STS_CLR
);
154 spin_unlock(&xhci
->lock
);
157 if(temp3
& SSUSB_SRP_REQ_INTR
){
159 //g_otg_srp_pend = false;
161 //while(g_otg_srp_pend);
162 printk("[OTG_H] get srp interrupt, just clear it\n");
164 writel(SSUSB_SRP_REQ_INTR_CLR
, SSUSB_OTG_STS_CLR
);
165 spin_unlock(&xhci
->lock
);
169 trb
= xhci
->event_ring
->dequeue
;
171 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
172 temp2
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
174 //printk(KERN_ERR "[OTG_H]mtktest_xhci_mtk_irq , status: %x\n", temp);
176 //printk(KERN_ERR "[OTG_H]mtktest_xhci_mtk_irq , irq_pending: %x\n", temp2);
178 if (!(temp
& STS_EINT
) && !ER_IRQ_PENDING(temp2
)) {
179 spin_unlock(&xhci
->lock
);
182 xhci_dbg(xhci
, "--> interrupt in: op_reg(%08x), irq_pend(%08x)\n", temp
, temp2
);
185 xhci_dbg(xhci
, "Got interrupt\n");
186 xhci_dbg(xhci
, "op reg status = %08x\n", temp
);
187 xhci_dbg(xhci
, "ir set irq_pending = %08x\n", temp2
);
188 xhci_dbg(xhci
, "Event ring dequeue ptr:\n");
189 xhci_dbg(xhci
, "@%llx %08x %08x %08x %08x\n",
190 (unsigned long long)mtktest_xhci_trb_virt_to_dma(xhci
->event_ring
->deq_seg
, trb
),
191 lower_32_bits(trb
->link
.segment_ptr
),
192 upper_32_bits(trb
->link
.segment_ptr
),
193 (unsigned int) trb
->link
.intr_target
,
194 (unsigned int) trb
->link
.control
);
196 if(g_intr_handled
!= -1){
200 spin_unlock(&xhci
->lock
);
201 xhci_dbg(xhci
, "<-- interrupt out\n");
207 static int mtk_idpin_irqnum
;
210 #define IDPIN_IN MT_EINT_POL_NEG
211 #define IDPIN_OUT MT_EINT_POL_POS
213 enum usbif_idpin_state
{
220 static struct xhci_hcd
*mtk_xhci
= NULL
;
221 static bool mtk_id_nxt_state
= IDPIN_IN
;
222 #define IDDIG_EINT_PIN (16)
224 static bool mtktest_set_iddig_out_detect(){
226 mt_eint_set_polarity(IDDIG_EINT_PIN
, MT_EINT_POL_POS
);
227 mt_eint_unmask(IDDIG_EINT_PIN
);
229 irq_set_irq_type(mtk_idpin_irqnum
, IRQF_TRIGGER_HIGH
);
230 //enable_irq(mtk_idpin_irqnum);
234 static bool mtktest_set_iddig_in_detect(){
236 mt_eint_set_polarity(IDDIG_EINT_PIN
, MT_EINT_POL_NEG
);
237 mt_eint_unmask(IDDIG_EINT_PIN
);
239 irq_set_irq_type(mtk_idpin_irqnum
, IRQF_TRIGGER_LOW
);
240 //enable_irq(mtk_idpin_irqnum);
244 static void mtktest_xhci_eint_iddig_isr(void){
246 static irqreturn_t
mtktest_xhci_eint_iddig_isr(int irqnum
, void *data
){
248 bool cur_id_state
= mtk_id_nxt_state
;
250 if(cur_id_state
== IDPIN_IN
){ // HOST
251 /* open port power and switch resource to host */
253 /* assert port power bit to drive drv_vbus */
254 //mtktest_enableXhciAllPortPower(mtk_xhci);
256 /* expect next isr is for id-pin out action */
257 mtk_id_nxt_state
= IDPIN_OUT
;
262 /* make id pin to detect the plug-out */
263 mtktest_set_iddig_out_detect();
265 //writel(SSUSB_ATTACH_A_ROLE, SSUSB_OTG_STS);
267 else{ /* IDPIN_OUT */
268 /* deassert port power bit to drop off vbus */
269 //mtktest_disableXhciAllPortPower(mtk_xhci);
270 /* close all port power, but not switch the resource */
271 //mtk_switch2device(false);
272 /* expect next isr is for id-pin in action */
273 mtk_id_nxt_state
= IDPIN_IN
;
277 /* make id pin to detect the plug-in */
278 mtktest_set_iddig_in_detect();
280 //writel(SSUSB_ATTACH_B_ROLE, SSUSB_OTG_STS);
283 printk("[OTG_H] xhci switch resource to %s\n", (cur_id_state
== IDPIN_IN
)? "host": "device");
286 void mtktest_mtk_xhci_set(struct xhci_hcd
*xhci
){
290 void mtktest_mtk_xhci_eint_iddig_init(void){
292 mt_eint_set_sens(IDDIG_EINT_PIN
, MT_LEVEL_SENSITIVE
);
293 mt_eint_set_hw_debounce(IDDIG_EINT_PIN
,64);
297 mtk_idpin_irqnum
= mt_gpio_to_irq(IDDIG_EINT_PIN
);
299 mt_gpio_set_debounce(IDDIG_EINT_PIN
, 50);
301 mtktest_set_iddig_in_detect();
304 mt_eint_registration(IDDIG_EINT_PIN
, EINTF_TRIGGER_LOW
, mtktest_xhci_eint_iddig_isr
, false);
307 request_irq(mtk_idpin_irqnum
, mtktest_xhci_eint_iddig_isr
, IRQF_TRIGGER_LOW
, "usbif_iddig_eint",
311 enable_irq(mtk_idpin_irqnum
);
315 printk("[OTG_H] XHCI test driver GPIO iddig setting done.\n");
318 void mtktest_mtk_xhci_eint_iddig_deinit(void){
320 mt_eint_registration(IDDIG_EINT_PIN
, EINTF_TRIGGER_LOW
, NULL
, false);
322 disable_irq_nosync(mtk_idpin_irqnum
);
324 free_irq(mtk_idpin_irqnum
, NULL
);
328 printk("[OTG_H] XHCI test driver GPIO iddig deinit done.\n");
334 // xhci original functions
336 /* TODO: copied from ehci-hcd.c - can this be refactored? */
338 * handshake - spin reading hc until handshake completes or fails
339 * @ptr: address of hc register to be read
340 * @mask: bits to look at in result of read
341 * @done: value of those bits when handshake succeeds
342 * @usec: timeout in microseconds
344 * Returns negative errno, or zero on success
346 * Success happens when the "mask" bits have the specified value (hardware
347 * handshake done). There are two failure modes: "usec" have passed (major
348 * hardware flakeout), or the register reads as all-ones (hardware removed).
350 static int handshake(struct xhci_hcd
*xhci
, void __iomem
*ptr
,
351 u32 mask
, u32 done
, int usec
)
356 result
= xhci_readl(xhci
, ptr
);
357 if (result
== ~(u32
)0) /* card removed */
369 * Disable interrupts and begin the xHCI halting process.
371 void mtktest_xhci_quiesce(struct xhci_hcd
*xhci
)
378 halted
= xhci_readl(xhci
, &xhci
->op_regs
->status
) & STS_HALT
;
382 cmd
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
384 xhci_writel(xhci
, cmd
, &xhci
->op_regs
->command
);
388 * Force HC into halt state.
390 * Disable any IRQs and clear the run/stop bit.
391 * HC will complete any current and actively pipelined transactions, and
392 * should halt within 16 microframes of the run/stop bit being cleared.
393 * Read HC Halted bit in the status register to see when the HC is finished.
394 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
396 int mtktest_xhci_halt(struct xhci_hcd
*xhci
)
398 xhci_dbg(xhci
, "// Halt the HC\n");
399 mtktest_xhci_quiesce(xhci
);
401 return handshake(xhci
, &xhci
->op_regs
->status
,
402 STS_HALT
, STS_HALT
, XHCI_MAX_HALT_USEC
);
405 * Set the run bit and wait for the host to be running.
407 int xhci_start(struct xhci_hcd
*xhci
)
412 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
414 xhci_dbg(xhci
, "// Turn on HC, cmd = 0x%x.\n",
416 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
419 * Wait for the HCHalted Status bit to be 0 to indicate the host is
422 ret
= handshake(xhci
, &xhci
->op_regs
->status
,
423 STS_HALT
, 0, XHCI_MAX_HALT_USEC
);
424 if (ret
== -ETIMEDOUT
)
425 xhci_err(xhci
, "[ERROR]Host took too long to start, "
426 "waited %u microseconds.\n",
432 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
434 * This resets pipelines, timers, counters, state machines, etc.
435 * Transactions will be terminated immediately, and operational registers
436 * will be set to their defaults.
438 int mtktest_xhci_reset(struct xhci_hcd
*xhci
)
444 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
445 if ((state
& STS_HALT
) == 0) {
446 xhci_warn(xhci
, "Host controller not halted, aborting reset.\n");
450 xhci_dbg(xhci
, "// Reset the HC\n");
451 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
452 command
|= CMD_RESET
;
453 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
454 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
455 xhci_to_hcd(xhci
)->state
= HC_STATE_HALT
;
457 ret
= handshake(xhci
, &xhci
->op_regs
->command
,
458 CMD_RESET
, 0, 250 * 1000);
462 xhci_dbg(xhci
, "Wait for controller to be ready for doorbell rings\n");
464 * xHCI cannot write to any doorbells or operational registers other
465 * than status until the "Controller Not Ready" flag is cleared.
467 return handshake(xhci
, &xhci
->op_regs
->status
, STS_CNR
, 0, 250 * 1000);
471 * Initialize memory for HCD and xHC (one-time init).
473 * Program the PAGESIZE register, initialize the device context array, create
474 * device contexts (?), set up a command ring segment (or two?), create event
475 * ring (one for now).
477 int mtktest_xhci_init(struct usb_hcd
*hcd
)
479 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
482 xhci_dbg(xhci
, "mtktest_xhci_init\n");
483 spin_lock_init(&xhci
->lock
);
485 xhci_dbg(xhci
, "QUIRK: Not clearing Link TRB chain bits.\n");
486 xhci
->quirks
|= XHCI_LINK_TRB_QUIRK
;
488 xhci_dbg(xhci
, "xHCI doesn't need link TRB QUIRK\n");
490 retval
= mtktest_xhci_mem_init(xhci
, GFP_KERNEL
);
491 xhci_dbg(xhci
, "Finished mtktest_xhci_init\n");
496 /*-------------------------------------------------------------------------*/
499 * mtktest_xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
500 * HCDs. Find the index for an endpoint given its descriptor. Use the return
501 * value to right shift 1 for the bitmask.
503 * Index = (epnum * 2) + direction - 1,
504 * where direction = 0 for OUT, 1 for IN.
505 * For control endpoints, the IN index is used (OUT index is unused), so
506 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
508 unsigned int mtktest_xhci_get_endpoint_index(struct usb_endpoint_descriptor
*desc
)
511 if (usb_endpoint_xfer_control(desc
))
512 index
= (unsigned int) (usb_endpoint_num(desc
)*2);
514 index
= (unsigned int) (usb_endpoint_num(desc
)*2) +
515 (usb_endpoint_dir_in(desc
) ? 1 : 0) - 1;
519 /* Find the flag for this endpoint (for use in the control context). Use the
520 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
523 unsigned int mtktest_xhci_get_endpoint_flag_from_index(unsigned int ep_index
)
525 return 1 << (ep_index
+ 1);
529 /* Compute the last valid endpoint context index. Basically, this is the
530 * endpoint index plus one. For slot contexts with more than valid endpoint,
531 * we find the most significant bit set in the added contexts flags.
532 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
533 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
535 unsigned int mtktest_xhci_last_valid_endpoint(u32 added_ctxs
)
537 return fls(added_ctxs
) - 1;
540 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd
*xhci
,
541 struct xhci_container_ctx
*in_ctx
,
542 struct xhci_container_ctx
*out_ctx
,
543 u32 add_flags
, u32 drop_flags
)
545 struct xhci_input_control_ctx
*ctrl_ctx
;
546 ctrl_ctx
= mtktest_xhci_get_input_control_ctx(xhci
, in_ctx
);
547 ctrl_ctx
->add_flags
= add_flags
;
548 ctrl_ctx
->drop_flags
= drop_flags
;
549 mtktest_xhci_slot_copy(xhci
, in_ctx
, out_ctx
);
550 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
552 xhci_dbg(xhci
, "Input Context:\n");
553 mtktest_xhci_dbg_ctx(xhci
, in_ctx
, mtktest_xhci_last_valid_endpoint(add_flags
));
556 void xhci_setup_input_ctx_for_quirk(struct xhci_hcd
*xhci
,
557 unsigned int slot_id
, unsigned int ep_index
,
558 struct xhci_dequeue_state
*deq_state
)
560 struct xhci_container_ctx
*in_ctx
;
561 struct xhci_ep_ctx
*ep_ctx
;
565 mtktest_xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
566 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
567 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
568 ep_ctx
= mtktest_xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
569 addr
= mtktest_xhci_trb_virt_to_dma(deq_state
->new_deq_seg
,
570 deq_state
->new_deq_ptr
);
572 xhci_warn(xhci
, "WARN Cannot submit config ep after "
573 "reset ep command\n");
574 xhci_warn(xhci
, "WARN deq seg = %p, deq ptr = %p\n",
575 deq_state
->new_deq_seg
,
576 deq_state
->new_deq_ptr
);
579 ep_ctx
->deq
= addr
| deq_state
->new_cycle_state
;
581 added_ctxs
= mtktest_xhci_get_endpoint_flag_from_index(ep_index
);
582 xhci_setup_input_ctx_for_config_ep(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
583 xhci
->devs
[slot_id
]->out_ctx
, added_ctxs
, added_ctxs
);
586 /* hc interface non-used functions */
587 int mtktest_xhci_mtk_run(struct usb_hcd
*hcd
){
588 printk("mtktest_xhci_mtk_run is called\n");
591 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
592 void (*doorbell
)(struct xhci_hcd
*) = NULL
;
594 hcd
->uses_new_polling
= 1;
597 xhci_dbg(xhci
, "mtktest_xhci_run\n");
598 #if 0 /* FIXME: MSI not setup yet */
599 /* Do this at the very last minute */
600 ret
= xhci_setup_msix(xhci
);
606 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
607 init_timer(&xhci
->event_ring_timer
);
608 xhci
->event_ring_timer
.data
= (unsigned long) xhci
;
609 xhci
->event_ring_timer
.function
= xhci_event_ring_work
;
610 /* Poll the event ring */
611 xhci
->event_ring_timer
.expires
= jiffies
+ POLL_TIMEOUT
* HZ
;
613 xhci_dbg(xhci
, "Setting event ring polling timer\n");
614 add_timer(&xhci
->event_ring_timer
);
617 xhci_dbg(xhci
, "Command ring memory map follows:\n");
618 mtktest_xhci_debug_ring(xhci
, xhci
->cmd_ring
);
619 mtktest_xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
620 mtktest_xhci_dbg_cmd_ptrs(xhci
);
622 xhci_dbg(xhci
, "ERST memory map follows:\n");
623 mtktest_xhci_dbg_erst(xhci
, &xhci
->erst
);
624 xhci_dbg(xhci
, "Event ring:\n");
625 mtktest_xhci_debug_ring(xhci
, xhci
->event_ring
);
626 mtktest_xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
627 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
628 temp_64
&= ~ERST_PTR_MASK
;
629 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
631 xhci_dbg(xhci
, "// Set the interrupt modulation register\n");
632 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_control
);
633 temp
&= ~ER_IRQ_INTERVAL_MASK
;
635 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_control
);
637 /* Set the HCD state before we enable the irqs */
638 hcd
->state
= HC_STATE_RUNNING
;
639 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
641 xhci_dbg(xhci
, "// Enable interrupts, cmd = 0x%x.\n",
643 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
645 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
646 xhci_dbg(xhci
, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
647 xhci
->ir_set
, (unsigned int) ER_IRQ_ENABLE(temp
));
648 xhci_writel(xhci
, ER_IRQ_ENABLE(temp
),
649 &xhci
->ir_set
->irq_pending
);
650 mtktest_xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
652 if (NUM_TEST_NOOPS
> 0)
653 doorbell
= mtktest_xhci_setup_one_noop(xhci
);
655 if (xhci
->quirks
& XHCI_NEC_HOST
)
656 mtktest_xhci_queue_address_device(xhci
, 0, 0, 0,
657 TRB_TYPE(TRB_NEC_GET_FW
));
659 if (xhci_start(xhci
)) {
660 mtktest_xhci_halt(xhci
);
664 xhci_dbg(xhci
, "// @%p = 0x%x\n", &xhci
->op_regs
->command
, temp
);
668 if (xhci
->quirks
& XHCI_NEC_HOST
)
669 mtktest_xhci_ring_cmd_db(xhci
);
673 mtktest_mtk_xhci_set(xhci
);
674 mtktest_mtk_xhci_eint_iddig_init();
681 mtktest_enableXhciAllPortPower(xhci
);
688 mtktest_disableAllClockPower();
689 xhci_dbg(xhci
, "Finished mtktest_xhci_run\n");
693 void mtktest_xhci_mtk_stop(struct usb_hcd
*hcd
){
694 printk("mtktest_xhci_mtk_stop is called\n");
696 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
698 #ifdef TEST_OTG_IDDIG // USBIF
699 mtktest_disableXhciAllPortPower(xhci
);
700 mtktest_mtk_xhci_eint_iddig_deinit();
704 spin_lock_irq(&xhci
->lock
);
705 mtktest_xhci_halt(xhci
);
706 mtktest_xhci_reset(xhci
);
707 spin_unlock_irq(&xhci
->lock
);
709 #if 0 /* No MSI yet */
710 xhci_cleanup_msix(xhci
);
712 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
713 /* Tell the event ring poll function not to reschedule */
715 del_timer_sync(&xhci
->event_ring_timer
);
718 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
719 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
720 xhci_writel(xhci
, temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
721 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
722 xhci_writel(xhci
, ER_IRQ_DISABLE(temp
),
723 &xhci
->ir_set
->irq_pending
);
724 mtktest_xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
726 xhci_dbg(xhci
, "cleaning up memory\n");
727 mtktest_xhci_mem_cleanup(xhci
);
728 xhci_dbg(xhci
, "mtktest_xhci_stop completed - status = %x\n",
729 xhci_readl(xhci
, &xhci
->op_regs
->status
));
733 void mtktest_xhci_mtk_shutdown(struct usb_hcd
*hcd
){
734 printk("mtktest_xhci_mtk_shutdown is called\n");
735 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
737 spin_lock_irq(&xhci
->lock
);
738 mtktest_xhci_halt(xhci
);
739 spin_unlock_irq(&xhci
->lock
);
742 xhci_cleanup_msix(xhci
);
745 xhci_dbg(xhci
, "mtktest_xhci_shutdown completed - status = %x\n",
746 xhci_readl(xhci
, &xhci
->op_regs
->status
));
749 int mtktest_xhci_mtk_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
, gfp_t mem_flags
){
750 printk("mtktest_xhci_mtk_urb_enqueue is called\n");
753 int mtktest_xhci_mtk_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
){
754 printk("mtktest_xhci_mtk_urb_dequeue is called\n");
757 int mtktest_xhci_mtk_alloc_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
){
758 printk("mtktest_xhci_mtk_alloc_dev is called\n");
761 void mtktest_xhci_mtk_free_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
){
762 printk("mtktest_xhci_mtk_free_dev is called\n");
765 int mtktest_xhci_mtk_alloc_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
766 , struct usb_host_endpoint
**eps
, unsigned int num_eps
,
767 unsigned int num_streams
, gfp_t mem_flags
){
768 printk("mtktest_xhci_mtk_alloc_streams is called\n");
771 int mtktest_xhci_mtk_free_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
772 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
774 printk("mtktest_xhci_mtk_free_streams is called\n");
777 int mtktest_xhci_mtk_add_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
, struct usb_host_endpoint
*ep
){
778 struct xhci_hcd
*xhci
;
779 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
780 unsigned int ep_index
;
781 struct xhci_ep_ctx
*ep_ctx
;
782 struct xhci_slot_ctx
*slot_ctx
;
783 struct xhci_input_control_ctx
*ctrl_ctx
;
785 unsigned int last_ctx
;
786 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
789 ret
= xhci_check_args(hcd
, udev
, ep
, 1, __func__
);
791 /* So we won't queue a reset ep command for a root hub */
796 xhci
= hcd_to_xhci(hcd
);
798 added_ctxs
= mtktest_xhci_get_endpoint_flag(&ep
->desc
);
799 last_ctx
= mtktest_xhci_last_valid_endpoint(added_ctxs
);
800 if (added_ctxs
== SLOT_FLAG
|| added_ctxs
== EP0_FLAG
) {
801 /* FIXME when we have to issue an evaluate endpoint command to
802 * deal with ep0 max packet size changing once we get the
805 xhci_dbg(xhci
, "xHCI %s - can't add slot or ep 0 %#x\n",
806 __func__
, added_ctxs
);
810 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
811 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
816 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
817 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
818 ctrl_ctx
= mtktest_xhci_get_input_control_ctx(xhci
, in_ctx
);
819 ep_index
= mtktest_xhci_get_endpoint_index(&ep
->desc
);
820 ep_ctx
= mtktest_xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
821 /* If the HCD has already noted the endpoint is enabled,
822 * ignore this request.
824 if (ctrl_ctx
->add_flags
& mtktest_xhci_get_endpoint_flag(&ep
->desc
)) {
825 xhci_warn(xhci
, "xHCI %s called with enabled ep %p\n",
831 * Configuration and alternate setting changes must be done in
832 * process context, not interrupt context (or so documenation
833 * for usb_set_interface() and usb_set_configuration() claim).
835 if (mtktest_xhci_endpoint_init(xhci
, xhci
->devs
[udev
->slot_id
],
836 udev
, ep
, GFP_NOIO
) < 0) {
837 dev_dbg(&udev
->dev
, "%s - could not initialize ep %#x\n",
838 __func__
, ep
->desc
.bEndpointAddress
);
842 ctrl_ctx
->add_flags
|= added_ctxs
;
843 new_add_flags
= ctrl_ctx
->add_flags
;
845 /* If xhci_endpoint_disable() was called for this endpoint, but the
846 * xHC hasn't been notified yet through the check_bandwidth() call,
847 * this re-adds a new state for the endpoint from the new endpoint
848 * descriptors. We must drop and re-add this endpoint, so we leave the
851 new_drop_flags
= ctrl_ctx
->drop_flags
;
853 slot_ctx
= mtktest_xhci_get_slot_ctx(xhci
, in_ctx
);
854 /* Update the last valid endpoint context, if we just added one past */
855 if ((slot_ctx
->dev_info
& LAST_CTX_MASK
) < LAST_CTX(last_ctx
)) {
856 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
857 slot_ctx
->dev_info
|= LAST_CTX(last_ctx
);
859 new_slot_info
= slot_ctx
->dev_info
;
861 /* Store the usb_device pointer for later use */
864 xhci_dbg(xhci
, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
865 (unsigned int) ep
->desc
.bEndpointAddress
,
867 (unsigned int) new_drop_flags
,
868 (unsigned int) new_add_flags
);
869 xhci_dbg(xhci
, "new slot context 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n"
870 , slot_ctx
->dev_info
, slot_ctx
->dev_info2
, slot_ctx
->tt_info
, slot_ctx
->dev_state
871 , slot_ctx
->reserved
[0], slot_ctx
->reserved
[1], slot_ctx
->reserved
[2], slot_ctx
->reserved
[3]);
875 int mtktest_xhci_mtk_drop_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
876 , struct usb_host_endpoint
*ep
){
877 struct xhci_hcd
*xhci
;
878 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
879 struct xhci_input_control_ctx
*ctrl_ctx
;
880 struct xhci_slot_ctx
*slot_ctx
;
881 unsigned int last_ctx
;
882 unsigned int ep_index
;
883 struct xhci_ep_ctx
*ep_ctx
;
885 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
888 xhci
= hcd_to_xhci(hcd
);
889 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
891 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
892 drop_flag
= mtktest_xhci_get_endpoint_flag(&ep
->desc
);
893 if (drop_flag
== SLOT_FLAG
|| drop_flag
== EP0_FLAG
) {
894 xhci_dbg(xhci
, "xHCI %s - can't drop slot or ep 0 %#x\n",
895 __func__
, drop_flag
);
899 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
900 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
901 ctrl_ctx
= mtktest_xhci_get_input_control_ctx(xhci
, in_ctx
);
902 ep_index
= mtktest_xhci_get_endpoint_index(&ep
->desc
);
903 ep_ctx
= mtktest_xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
905 /* If the HC already knows the endpoint is disabled,
906 * or the HCD has noted it is disabled, ignore this request
908 if ((le32_to_cpu(ep_ctx
->ep_info
) & EP_STATE_MASK
) ==
910 le32_to_cpu(ctrl_ctx
->drop_flags
) &
911 mtktest_xhci_get_endpoint_flag(&ep
->desc
)) {
912 xhci_warn(xhci
, "xHCI %s called with disabled ep %p\n",
917 ctrl_ctx
->drop_flags
|= cpu_to_le32(drop_flag
);
918 new_drop_flags
= le32_to_cpu(ctrl_ctx
->drop_flags
);
920 ctrl_ctx
->add_flags
&= cpu_to_le32(~drop_flag
);
921 new_add_flags
= le32_to_cpu(ctrl_ctx
->add_flags
);
923 last_ctx
= mtktest_xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx
->add_flags
));
924 slot_ctx
= mtktest_xhci_get_slot_ctx(xhci
, in_ctx
);
925 /* Update the last valid endpoint context, if we deleted the last one */
926 if ((le32_to_cpu(slot_ctx
->dev_info
) & LAST_CTX_MASK
) >
927 LAST_CTX(last_ctx
)) {
928 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
929 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(last_ctx
));
931 new_slot_info
= le32_to_cpu(slot_ctx
->dev_info
);
933 mtktest_xhci_endpoint_zero(xhci
, xhci
->devs
[udev
->slot_id
], ep
);
935 xhci_dbg(xhci
, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
936 (unsigned int) ep
->desc
.bEndpointAddress
,
938 (unsigned int) new_drop_flags
,
939 (unsigned int) new_add_flags
,
940 (unsigned int) new_slot_info
);
943 void mtktest_xhci_cleanup_stalled_ring(struct xhci_hcd
*xhci
,
944 struct usb_device
*udev
, unsigned int ep_index
)
946 struct xhci_dequeue_state deq_state
;
947 struct xhci_virt_ep
*ep
;
949 xhci_dbg(xhci
, "Cleaning up stalled endpoint ring\n");
950 ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
951 /* We need to move the HW's dequeue pointer past this TD,
952 * or it will attempt to resend it on the next doorbell ring.
954 mtktest_xhci_find_new_dequeue_state(xhci
, udev
->slot_id
,
955 ep_index
, ep
->stopped_stream
, ep
->stopped_td
,
958 /* HW with the reset endpoint quirk will use the saved dequeue state to
959 * issue a configure endpoint command later.
961 if (!(xhci
->quirks
& XHCI_RESET_EP_QUIRK
)) {
962 xhci_dbg(xhci
, "Queueing new dequeue state\n");
963 mtktest_xhci_queue_new_dequeue_state(xhci
, udev
->slot_id
,
964 ep_index
, ep
->stopped_stream
, &deq_state
);
966 /* Better hope no one uses the input context between now and the
967 * reset endpoint completion!
968 * XXX: No idea how this hardware will react when stream rings
971 xhci_dbg(xhci
, "Setting up input context for "
972 "configure endpoint command\n");
973 xhci_setup_input_ctx_for_quirk(xhci
, udev
->slot_id
,
974 ep_index
, &deq_state
);
978 void mtktest_xhci_zero_in_ctx(struct xhci_hcd
*xhci
, struct xhci_virt_device
*virt_dev
)
980 struct xhci_input_control_ctx
*ctrl_ctx
;
981 struct xhci_ep_ctx
*ep_ctx
;
982 struct xhci_slot_ctx
*slot_ctx
;
985 /* When a device's add flag and drop flag are zero, any subsequent
986 * configure endpoint command will leave that endpoint's state
987 * untouched. Make sure we don't leave any old state in the input
990 ctrl_ctx
= mtktest_xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
991 ctrl_ctx
->drop_flags
= 0;
992 ctrl_ctx
->add_flags
= 0;
993 slot_ctx
= mtktest_xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
994 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
995 /* Endpoint 0 is always valid */
996 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(1));
997 for (i
= 1; i
< 31; ++i
) {
998 ep_ctx
= mtktest_xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, i
);
1000 ep_ctx
->ep_info2
= 0;
1002 ep_ctx
->tx_info
= 0;
1007 void mtktest_xhci_mtk_endpoint_reset(struct usb_hcd
*hcd
, struct usb_host_endpoint
*ep
){
1008 printk("mtktest_xhci_mtk_endpoint_reset is called\n");
1011 int mtktest_xhci_mtk_check_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
){
1012 printk("mtktest_xhci_mtk_check_bandwidth is called\n");
1015 void mtktest_xhci_mtk_reset_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
){
1016 printk("mtktest_xhci_mtk_reset_bandwidth is called\n");
1019 int mtktest_xhci_mtk_address_device(struct usb_hcd
*hcd
, struct usb_device
*udev
){
1020 printk("mtktest_xhci_mtk_address_device is called\n");
1023 int mtktest_xhci_mtk_update_hub_device(struct usb_hcd
*hcd
, struct usb_device
*hdev
,
1024 struct usb_tt
*tt
, gfp_t mem_flags
){
1025 printk("mtktest_xhci_mtk_update_hub_device is called\n");
1028 int mtktest_xhci_mtk_reset_device(struct usb_hcd
*hcd
, struct usb_device
*udev
){
1029 printk("mtktest_xhci_mtk_reset_device is called\n");
1032 int mtktest_xhci_mtk_hub_control(struct usb_hcd
*hcd
, u16 typeReq
, u16 wValue
,
1033 u16 wIndex
, char *buf
, u16 wLength
){
1034 printk("mtktest_xhci_mtk_hub_control is called\n");
1038 int mtktest_xhci_mtk_hub_status_data(struct usb_hcd
*hcd
, char *buf
){
1039 printk("mtktest_xhci_mtk_hub_status_data is called\n");
1043 int mtktest_xhci_mtk_get_frame(struct usb_hcd
*hcd
){
1044 printk("mtktest_xhci_mtk_get_frame is called\n");
1046 #if defined(CONFIG_MTK_LM_MODE)
1047 #define MTK_XHCI_DMA_BIT_MASK DMA_BIT_MASK(64)
1049 #define MTK_XHCI_DMA_BIT_MASK DMA_BIT_MASK(32)
1052 static u64 dummy_mask
= MTK_XHCI_DMA_BIT_MASK
;
1054 static struct platform_device xhci_platform_dev
= {
1058 // .dma_mask = &dummy_mask,
1059 .coherent_dma_mask
= MTK_XHCI_DMA_BIT_MASK
,
1060 .release
= xhci_hcd_release
,
1065 #define U3_MAC_TX_FIFO_WAIT_EMPTY_ADDR 0xf0041144
1067 void setMacFIFOWaitEmptyValue(){
1068 __u32 __iomem
*mac_tx_fifo_wait_empty_addr
;
1069 u32 mac_tx_fifo_wait_empty_value
;
1070 mac_tx_fifo_wait_empty_addr
= U3_MAC_TX_FIFO_WAIT_EMPTY_ADDR
;
1071 mac_tx_fifo_wait_empty_value
= 0x5;
1072 writel(mac_tx_fifo_wait_empty_value
, mac_tx_fifo_wait_empty_addr
);
1077 //initial MAC3 register, should be called after HC reset and before set PP=1 of each port
1078 void mtktest_setInitialReg(){
1079 __u32 __iomem
*addr
;
1083 num_u3_port
= SSUSB_U3_PORT_NUM(readl((void __iomem
*)SSUSB_IP_CAP
));
1085 printk("[OTG_H] mtktest_setInitialReg , num_u3_port = %d\n", num_u3_port
);
1086 // USBIF , we should enable it in real chip
1088 //set MAC reference clock speed
1089 addr
= SSUSB_U3_MAC_BASE
+U3_UX_EXIT_LFPS_TIMING_PAR
;
1091 temp
&= ~(0xff << U3_RX_UX_EXIT_LFPS_REF_OFFSET
);
1092 temp
|= (U3_RX_UX_EXIT_LFPS_REF
<< U3_RX_UX_EXIT_LFPS_REF_OFFSET
);
1094 addr
= SSUSB_U3_MAC_BASE
+U3_REF_CK_PAR
;
1097 temp
|= U3_REF_CK_VAL
;
1101 addr
= SSUSB_U3_SYS_BASE
+U3_TIMING_PULSE_CTRL
;
1104 temp
|= CNT_1US_VALUE
;
1108 addr
= SSUSB_U2_SYS_BASE
+USB20_TIMING_PARAMETER
;
1110 temp
|= TIME_VALUE_1US
;
1113 // USBIF , USBIF , we should enable it in real chip
1115 //set LINK_PM_TIMER=3
1116 addr
= SSUSB_U3_SYS_BASE
+LINK_PM_TIMER
;
1119 temp
|= PM_LC_TIMEOUT_VALUE
;
1126 void mtktest_setLatchSel(){
1127 __u32 __iomem
*latch_sel_addr
;
1128 u32 latch_sel_value
;
1130 if(g_num_u3_port
<= 0)
1133 latch_sel_addr
= U3_PIPE_LATCH_SEL_ADD
;
1134 latch_sel_value
= ((U3_PIPE_LATCH_TX
)<<2) | (U3_PIPE_LATCH_RX
);
1135 writel(latch_sel_value
, latch_sel_addr
);
1138 void mtktest_reinitIP(){
1139 __u32 __iomem
*ip_reset_addr
;
1143 //writel(SSUSB_XHCI_SW_RST, SSUSB_XHCI_RST_CTRL);
1145 //writel(0, SSUSB_XHCI_RST_CTRL);
1148 //enable clock/gating, include re-init IP in IPPC
1149 mtktest_enableAllClockPower();
1150 /* set MAC3 PIPE latch */
1151 mtktest_setLatchSel();
1152 mtktest_mtk_xhci_scheduler_init();
1156 int mtk_xhci_hcd_init(void)
1159 __u32 __iomem
*ip_reset_addr
;
1161 struct platform_device
*pPlatformDev
;
1163 printk(KERN_ERR
"Module Init start!\n");
1167 retval
= platform_driver_register(&xhci_versatile_driver
);
1170 printk(KERN_ERR
"Problem registering platform driver.");
1174 pPlatformDev
= &xhci_platform_dev
;
1175 memset(pPlatformDev
, 0, sizeof(struct platform_device
));
1176 pPlatformDev
->name
= hcd_name
;
1177 pPlatformDev
->id
= -1;
1178 pPlatformDev
->dev
.coherent_dma_mask
= MTK_XHCI_DMA_BIT_MASK
;
1179 pPlatformDev
->dev
.release
= xhci_hcd_release
;
1180 retval
= platform_device_register(&xhci_platform_dev
);
1183 platform_driver_unregister (&xhci_versatile_driver
);
1185 printk(KERN_ERR
"Module Init success!\n");
1186 //mtktest_setInitialReg();
1188 mtktest_mtk_xhci_eint_iddig_init() ;
1190 * Check the compiler generated sizes of structures that must be laid
1191 * out in specific ways for hardware access.
1193 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
1194 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx
) != 8*32/8);
1195 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx
) != 8*32/8);
1196 /* xhci_device_control has eight fields, and also
1197 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
1199 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx
) != 4*32/8);
1200 BUILD_BUG_ON(sizeof(union xhci_trb
) != 4*32/8);
1201 BUILD_BUG_ON(sizeof(struct xhci_erst_entry
) != 4*32/8);
1202 BUILD_BUG_ON(sizeof(struct xhci_cap_regs
) != 7*32/8);
1203 BUILD_BUG_ON(sizeof(struct xhci_intr_reg
) != 8*32/8);
1204 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
1205 BUILD_BUG_ON(sizeof(struct xhci_run_regs
) != (8+8*128)*32/8);
1206 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
1210 void mtk_xhci_hcd_cleanup(void)
1212 //xhci_unregister_pci();
1215 struct platform_device
*pPlatformDev
;
1217 mtktest_mtk_xhci_eint_iddig_deinit() ;
1219 platform_device_unregister(&xhci_platform_dev
);
1220 platform_driver_unregister(&xhci_versatile_driver
);