struct device *dev = xhci_to_hcd(xhci)->self.controller;
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
- xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Allocating %d scratchpad buffers", num_sp);
if (!num_sp)
return 0;
dma_free_coherent(&pdev->dev, size,
xhci->erst.entries, xhci->erst.erst_dma_addr);
xhci->erst.entries = NULL;
- xhci_dbg(xhci, "Freed ERST\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
if (xhci->event_ring)
xhci_ring_free(xhci, xhci->event_ring);
xhci->event_ring = NULL;
- xhci_dbg(xhci, "Freed event ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
if (xhci->lpm_command)
xhci_free_command(xhci, xhci->lpm_command);
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
- xhci_dbg(xhci, "Freed command ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
list_for_each_entry_safe(cur_cd, next_cd,
&xhci->cancel_cmd_list, cancel_cmd_list) {
list_del(&cur_cd->cancel_cmd_list);
if (xhci->segment_pool)
dma_pool_destroy(xhci->segment_pool);
xhci->segment_pool = NULL;
- xhci_dbg(xhci, "Freed segment pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
if (xhci->device_pool)
dma_pool_destroy(xhci->device_pool);
xhci->device_pool = NULL;
- xhci_dbg(xhci, "Freed device context pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
if (xhci->small_streams_pool)
dma_pool_destroy(xhci->small_streams_pool);
xhci->small_streams_pool = NULL;
- xhci_dbg(xhci, "Freed small stream array pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Freed small stream array pool");
if (xhci->medium_streams_pool)
dma_pool_destroy(xhci->medium_streams_pool);
xhci->medium_streams_pool = NULL;
- xhci_dbg(xhci, "Freed medium stream array pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Freed medium stream array pool");
if (xhci->dcbaa)
dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
* there might be more events to service.
*/
temp &= ~ERST_EHB;
- xhci_dbg(xhci, "// Write event ring dequeue pointer, "
- "preserving EHB bit\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Write event ring dequeue pointer, "
+ "preserving EHB bit");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue);
}
temp = xhci_readl(xhci, addr + 2);
port_offset = XHCI_EXT_PORT_OFF(temp);
port_count = XHCI_EXT_PORT_COUNT(temp);
- xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
- "count = %u, revision = 0x%x\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Ext Cap %p, port offset = %u, "
+ "count = %u, revision = 0x%x",
addr, port_offset, port_count, major_revision);
/* Port count includes the current port offset */
if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
/* Check the host's USB2 LPM capability */
if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
(temp & XHCI_L1C)) {
- xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI 0.96: support USB2 software lpm");
xhci->sw_lpm_support = 1;
}
if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
- xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI 1.0: support USB2 software lpm");
xhci->sw_lpm_support = 1;
if (temp & XHCI_HLC) {
- xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI 1.0: support USB2 hardware lpm");
xhci->hw_lpm_support = 1;
}
}
xhci_warn(xhci, "No ports on the roothubs?\n");
return -ENODEV;
}
- xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Found %u USB 2.0 ports and %u USB 3.0 ports.",
xhci->num_usb2_ports, xhci->num_usb3_ports);
/* Place limits on the number of roothub ports so that the hub
* descriptors aren't longer than the USB core will allocate.
*/
if (xhci->num_usb3_ports > 15) {
- xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Limiting USB 3.0 roothub ports to 15.");
xhci->num_usb3_ports = 15;
}
if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
- xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Limiting USB 2.0 roothub ports to %u.",
USB_MAXCHILDREN);
xhci->num_usb2_ports = USB_MAXCHILDREN;
}
xhci->usb2_ports[port_index] =
&xhci->op_regs->port_status_base +
NUM_PORT_REGS*i;
- xhci_dbg(xhci, "USB 2.0 port at index %u, "
- "addr = %p\n", i,
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "USB 2.0 port at index %u, "
+ "addr = %p", i,
xhci->usb2_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb2_ports)
xhci->usb3_ports[port_index] =
&xhci->op_regs->port_status_base +
NUM_PORT_REGS*i;
- xhci_dbg(xhci, "USB 3.0 port at index %u, "
- "addr = %p\n", i,
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "USB 3.0 port at index %u, "
+ "addr = %p", i,
xhci->usb3_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb3_ports)
INIT_LIST_HEAD(&xhci->cancel_cmd_list);
page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
- xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Supported page size register = 0x%x", page_size);
for (i = 0; i < 16; i++) {
if ((0x1 & page_size) != 0)
break;
page_size = page_size >> 1;
}
if (i < 16)
- xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Supported page size of %iK", (1 << (i+12)) / 1024);
else
xhci_warn(xhci, "WARN: no supported page size\n");
/* Use 4K pages, since that's common and the minimum the HC supports */
xhci->page_shift = 12;
xhci->page_size = 1 << xhci->page_shift;
- xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "HCD page size set to %iK", xhci->page_size / 1024);
/*
* Program the Number of Device Slots Enabled field in the CONFIG
* register with the max value of slots the HC can handle.
*/
val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
- xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
- (unsigned int) val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// xHC can handle at most %d device slots.", val);
val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
val |= (val2 & ~HCS_SLOTS_MASK);
- xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
- (unsigned int) val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Setting Max device slots reg = 0x%x.", val);
xhci_writel(xhci, val, &xhci->op_regs->config_reg);
/*
goto fail;
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
xhci->dcbaa->dma = dma;
- xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
if (!xhci->cmd_ring)
goto fail;
- xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
- xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Allocated command ring at %p", xhci->cmd_ring);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
(unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
- xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Setting command ring address to 0x%x", val);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
val = xhci_readl(xhci, &xhci->cap_regs->db_off);
val &= DBOFF_MASK;
- xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
- " from cap regs base addr\n", val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Doorbell array is located at offset 0x%x"
+ " from cap regs base addr", val);
xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
* Event ring setup: Allocate a normal ring, but also setup
* the event ring segment table (ERST). Section 4.9.3.
*/
- xhci_dbg(xhci, "// Allocating event ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
flags);
if (!xhci->event_ring)
GFP_KERNEL);
if (!xhci->erst.entries)
goto fail;
- xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Allocated event ring segment table at 0x%llx",
(unsigned long long)dma);
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
xhci->erst.num_entries = ERST_NUM_SEGS;
xhci->erst.erst_dma_addr = dma;
- xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
xhci->erst.num_entries,
xhci->erst.entries,
(unsigned long long)xhci->erst.erst_dma_addr);
val = xhci_readl(xhci, &xhci->ir_set->erst_size);
val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS;
- xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Write ERST size = %i to ir_set 0 (some bits preserved)",
val);
xhci_writel(xhci, val, &xhci->ir_set->erst_size);
- xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Set ERST entries to point to event ring.");
/* set the segment table base address */
- xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Set ERST base address for ir_set 0 = 0x%llx",
(unsigned long long)xhci->erst.erst_dma_addr);
val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
- xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Wrote ERST address to ir_set 0.");
xhci_print_ir_set(xhci, 0);
/*
int xhci_halt(struct xhci_hcd *xhci)
{
int ret;
- xhci_dbg(xhci, "// Halt the HC\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
xhci_quiesce(xhci);
ret = xhci_handshake(xhci, &xhci->op_regs->status,
temp = xhci_readl(xhci, &xhci->op_regs->command);
temp |= (CMD_RUN);
- xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
temp);
xhci_writel(xhci, temp, &xhci->op_regs->command);
return 0;
}
- xhci_dbg(xhci, "// Reset the HC\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_RESET;
xhci_writel(xhci, command, &xhci->op_regs->command);
if (ret)
return ret;
- xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Wait for controller to be ready for doorbell rings");
/*
* xHCI cannot write to any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared.
ret = pci_enable_msi(pdev);
if (ret) {
- xhci_dbg(xhci, "failed to allocate MSI entry\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "failed to allocate MSI entry");
return ret;
}
ret = request_irq(pdev->irq, xhci_msi_irq,
0, "xhci_hcd", xhci_to_hcd(xhci));
if (ret) {
- xhci_dbg(xhci, "disable MSI interrupt\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "disable MSI interrupt");
pci_disable_msi(pdev);
}
ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
if (ret) {
- xhci_dbg(xhci, "Failed to enable MSI-X\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Failed to enable MSI-X");
goto free_entries;
}
return ret;
disable_msix:
- xhci_dbg(xhci, "disable MSI-X interrupt\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
xhci_free_irq(xhci);
pci_disable_msix(pdev);
free_entries:
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int retval = 0;
- xhci_dbg(xhci, "xhci_init\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
spin_lock_init(&xhci->lock);
if (xhci->hci_version == 0x95 && link_quirk) {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"QUIRK: Not clearing Link TRB chain bits.");
xhci->quirks |= XHCI_LINK_TRB_QUIRK;
} else {
- xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI doesn't need link TRB QUIRK");
}
retval = xhci_mem_init(xhci, GFP_KERNEL);
- xhci_dbg(xhci, "Finished xhci_init\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
/* Initializing Compliance Mode Recovery Data If Needed */
if (xhci_compliance_mode_recovery_timer_quirk_check()) {
if (xhci->quirks & XHCI_NEC_HOST)
xhci_ring_cmd_db(xhci);
- xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Finished xhci_run for USB3 roothub");
return 0;
}
if (!usb_hcd_is_primary_hcd(hcd))
return xhci_run_finished(xhci);
- xhci_dbg(xhci, "xhci_run\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
ret = xhci_try_enable_msi(hcd);
if (ret)
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
- xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
- xhci_dbg(xhci, "// Set the interrupt modulation register\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Set the interrupt modulation register");
temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK;
temp |= (u32) 160;
/* Set the HCD state before we enable the irqs */
temp = xhci_readl(xhci, &xhci->op_regs->command);
temp |= (CMD_EIE);
- xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
- temp);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Enable interrupts, cmd = 0x%x.", temp);
xhci_writel(xhci, temp, &xhci->op_regs->command);
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
&xhci->ir_set->irq_pending);
xhci_queue_vendor_command(xhci, 0, 0, 0,
TRB_TYPE(TRB_NEC_GET_FW));
- xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Finished xhci_run for USB2 roothub");
return 0;
}
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_dev_put();
- xhci_dbg(xhci, "// Disabling event ring interrupts\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Disabling event ring interrupts");
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
&xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
- xhci_dbg(xhci, "cleaning up memory\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci);
- xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
- xhci_readl(xhci, &xhci->op_regs->status));
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xhci_stop completed - status = %x",
+ xhci_readl(xhci, &xhci->op_regs->status));
}
/*
xhci_cleanup_msix(xhci);
- xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
- xhci_readl(xhci, &xhci->op_regs->status));
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xhci_shutdown completed - status = %x",
+ xhci_readl(xhci, &xhci->op_regs->status));
}
#ifdef CONFIG_PM
xhci->cmd_ring->dequeue) &
(u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
- xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Setting command ring address to 0x%llx",
(long unsigned long) val_64);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
}