return IRQ_NONE; /* not our interrupt */
}
- if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY))
+ if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) {
+ vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_NOTIFY]);
enic_notify_check(enic);
+ }
if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) {
+ vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_ERR]);
enic_log_q_error(enic);
/* schedule recovery from WQ/RQ error */
schedule_work(&enic->reset);
{
struct enic *enic = data;
+ vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_ERR]);
+
enic_log_q_error(enic);
/* schedule recovery from WQ/RQ error */
{
struct enic *enic = data;
+ vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_NOTIFY]);
enic_notify_check(enic);
- vnic_intr_unmask(&enic->intr[ENIC_MSIX_NOTIFY]);
return IRQ_HANDLED;
}
vlan_tag_insert, vlan_tag);
}
-/* netif_tx_lock held, process context with BHs disabled */
+/* netif_tx_lock held, process context with BHs disabled, or BH */
static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
lro_flush_all(&enic->lro_mgr);
napi_complete(napi);
- vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
+ vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
}
return rq_work_done;
vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
- /* Accumulate intr event credits for this polling
+ /* Return intr event credits for this polling
* cycle. An intr event is the completion of a
- * a WQ or RQ packet.
+ * RQ packet.
*/
vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
}
+static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
+{
+ return ioread32(&intr->ctrl->int_credits);
+}
+
+static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
+{
+ unsigned int credits = vnic_intr_credits(intr);
+ int unmask = 1;
+ int reset_timer = 1;
+
+ vnic_intr_return_credits(intr, credits, unmask, reset_timer);
+}
+
static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
{
/* read PBA without clearing */