}
#endif
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
return handled;
}
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
case MUSB_QUIRK_B_INVALID_VBUS_91:
- if (!musb->session && !musb->quirk_invalid_vbus) {
- musb->quirk_invalid_vbus = true;
+ if (musb->quirk_retries--) {
musb_dbg(musb,
- "First invalid vbus, assume no session");
+ "Poll devctl on invalid vbus, assume no session");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+
return;
}
- break;
case MUSB_QUIRK_A_DISCONNECT_19:
+ if (musb->quirk_retries--) {
+ musb_dbg(musb,
+ "Poll devctl on possible host mode disconnect");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+
+ return;
+ }
if (!musb->session)
break;
musb_dbg(musb, "Allow PM on possible host mode disconnect");
if (error < 0)
dev_err(musb->controller, "Could not enable: %i\n",
error);
+ musb->quirk_retries = 3;
} else {
musb_dbg(musb, "Allow PM with no session: %02x", devctl);
- musb->quirk_invalid_vbus = false;
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
}
/* Only used to provide driver mode change events */
static void musb_irq_work(struct work_struct *data)
{
- struct musb *musb = container_of(data, struct musb, irq_work);
+ struct musb *musb = container_of(data, struct musb, irq_work.work);
musb_pm_runtime_check_session(musb);
musb_generic_disable(musb);
/* Init IRQ workqueue before request_irq */
- INIT_WORK(&musb->irq_work, musb_irq_work);
+ INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
musb_host_cleanup(musb);
fail3:
- cancel_work_sync(&musb->irq_work);
+ cancel_delayed_work_sync(&musb->irq_work);
cancel_delayed_work_sync(&musb->finish_resume_work);
cancel_delayed_work_sync(&musb->deassert_reset_work);
if (musb->dma_controller)
*/
musb_exit_debugfs(musb);
- cancel_work_sync(&musb->irq_work);
+ cancel_delayed_work_sync(&musb->irq_work);
cancel_delayed_work_sync(&musb->finish_resume_work);
cancel_delayed_work_sync(&musb->deassert_reset_work);
pm_runtime_get_sync(musb->controller);
struct musb_context_registers context;
irqreturn_t (*isr)(int, void *);
- struct work_struct irq_work;
+ struct delayed_work irq_work;
struct delayed_work deassert_reset_work;
struct delayed_work finish_resume_work;
struct delayed_work gadget_work;
int port_mode; /* MUSB_PORT_MODE_* */
bool session;
- bool quirk_invalid_vbus;
+ unsigned long quirk_retries;
bool is_host;
int a_wait_bcon; /* VBUS timeout in msecs */
musb_ep->dma ? "dma, " : "",
musb_ep->packet_sz);
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
fail:
spin_unlock_irqrestore(&musb->lock, flags);
musb_ep->desc = NULL;
musb_ep->end_point.desc = NULL;
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
spin_unlock_irqrestore(&(musb->lock), flags);
*/
/* Force check of devctl register for PM runtime */
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
idle_timeout = jiffies + (1 * HZ);
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
} else /* A-dev state machine */ {
dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
break;
}
}
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
return idle_timeout;
}
musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
if (reg & ~TUSB_PRCM_WNORCS) {
musb->is_active = 1;
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
}
dev_dbg(musb->controller, "wake %sactive %02x\n",
musb->is_active ? "" : "in", reg);