enic: support skb->xmit_more
authorGovindarajulu Varadarajan <_govind@gmx.com>
Wed, 19 Nov 2014 07:29:32 +0000 (12:59 +0530)
committerDavid S. Miller <davem@davemloft.net>
Fri, 21 Nov 2014 17:17:54 +0000 (12:17 -0500)
Check and update posted_index only when skb->xmit_more is 0 or tx queue is full.

v2:
use txq_map instead of skb_get_queue_mapping(skb)

Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/vnic_wq.h

index 5afe360c7e89b3a0d0cc59494498300995fe51ab..b9cda2fc5ae8df9a46de116402b2315987549622 100644 (file)
@@ -533,6 +533,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
        struct vnic_wq *wq;
        unsigned long flags;
        unsigned int txq_map;
+       struct netdev_queue *txq;
 
        if (skb->len <= 0) {
                dev_kfree_skb_any(skb);
@@ -541,6 +542,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
 
        txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
        wq = &enic->wq[txq_map];
+       txq = netdev_get_tx_queue(netdev, txq_map);
 
        /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
         * which is very likely.  In the off chance it's going to take
@@ -558,7 +560,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
 
        if (vnic_wq_desc_avail(wq) <
            skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
-               netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
+               netif_tx_stop_queue(txq);
                /* This is a hard error, log it */
                netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
                spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
@@ -568,7 +570,9 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
        enic_queue_wq_skb(enic, wq, skb);
 
        if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
-               netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
+               netif_tx_stop_queue(txq);
+       if (!skb->xmit_more || netif_xmit_stopped(txq))
+               vnic_wq_doorbell(wq);
 
        spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
 
index 2c6c70804a39edb475d4c97e1f3e617aad387ed4..816f1ad6072f142b7e6ac19e372fd6338f2a6efb 100644 (file)
@@ -104,6 +104,17 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
        return wq->to_use->desc;
 }
 
+static inline void vnic_wq_doorbell(struct vnic_wq *wq)
+{
+       /* Adding write memory barrier prevents compiler and/or CPU
+        * reordering, thus avoiding descriptor posting before
+        * descriptor is initialized. Otherwise, hardware can read
+        * stale descriptor fields.
+        */
+       wmb();
+       iowrite32(wq->to_use->index, &wq->ctrl->posted_index);
+}
+
 static inline void vnic_wq_post(struct vnic_wq *wq,
        void *os_buf, dma_addr_t dma_addr,
        unsigned int len, int sop, int eop,
@@ -122,15 +133,6 @@ static inline void vnic_wq_post(struct vnic_wq *wq,
        buf->wr_id = wrid;
 
        buf = buf->next;
-       if (eop) {
-               /* Adding write memory barrier prevents compiler and/or CPU
-                * reordering, thus avoiding descriptor posting before
-                * descriptor is initialized. Otherwise, hardware can read
-                * stale descriptor fields.
-                */
-               wmb();
-               iowrite32(buf->index, &wq->ctrl->posted_index);
-       }
        wq->to_use = buf;
 
        wq->ring.desc_avail -= desc_skip_cnt;