i40e/i40evf: Add txring_txq function to match fm10k and ixgbe
authorAlexander Duyck <alexander.h.duyck@intel.com>
Mon, 12 Sep 2016 21:18:40 +0000 (14:18 -0700)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Sun, 25 Sep 2016 05:22:17 +0000 (22:22 -0700)
This patch adds a txring_txq function which allows us to convert a
i40e_ring/i40evf_ring to a netdev_tx_queue structure.  This way we
can avoid having to make a multi-line function call for all the spots
that need access to this.

Change-ID: Ic063b71d8b92ea406d2c32e798c8e2b02809d65b
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h

index a2077bedac934aa08500608300e343cb0833f041..6e0a7acf17a286965c607055f2542a3e84c3ade9 100644 (file)
@@ -584,8 +584,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
                return;
 
        /* cleanup Tx queue statistics */
-       netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
-                                                 tx_ring->queue_index));
+       netdev_tx_reset_queue(txring_txq(tx_ring));
 }
 
 /**
@@ -754,8 +753,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
                        tx_ring->arm_wb = true;
        }
 
-       netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
-                                                     tx_ring->queue_index),
+       /* notify netdev of completed buffers */
+       netdev_tx_completed_queue(txring_txq(tx_ring),
                                  total_packets, total_bytes);
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
@@ -2784,9 +2783,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
        tx_ring->next_to_use = i;
 
-       netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
-                                                tx_ring->queue_index),
-                                                first->bytecount);
+       netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
        i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        /* Algorithm to optimize tail and RS bit setting:
@@ -2811,13 +2808,11 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
         * trigger a force WB.
         */
        if (skb->xmit_more  &&
-           !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
-                                                   tx_ring->queue_index))) {
+           !netif_xmit_stopped(txring_txq(tx_ring))) {
                tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
                tail_bump = false;
        } else if (!skb->xmit_more &&
-                  !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
-                                                      tx_ring->queue_index)) &&
+                  !netif_xmit_stopped(txring_txq(tx_ring)) &&
                   (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
                   (tx_ring->packet_stride < WB_STRIDE) &&
                   (desc_count < WB_STRIDE)) {
index b78c810d18358b5dd9628ae089564e0c6a3ec4df..508840585645d368e737bcd72c987bb4977b50c6 100644 (file)
@@ -463,4 +463,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
        return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
               (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
 }
+
+/**
+ * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+       return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
 #endif /* _I40E_TXRX_H_ */
index cb6b13098699df9b1cae54bb604f37b82a606602..d4c6a767be948e51a48f983d5f4b87e2f71ca71d 100644 (file)
@@ -103,8 +103,7 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
                return;
 
        /* cleanup Tx queue statistics */
-       netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
-                                                 tx_ring->queue_index));
+       netdev_tx_reset_queue(txring_txq(tx_ring));
 }
 
 /**
@@ -273,8 +272,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
                        tx_ring->arm_wb = true;
        }
 
-       netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
-                                                     tx_ring->queue_index),
+       /* notify netdev of completed buffers */
+       netdev_tx_completed_queue(txring_txq(tx_ring),
                                  total_packets, total_bytes);
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
@@ -2012,9 +2011,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
        tx_ring->next_to_use = i;
 
-       netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
-                                                tx_ring->queue_index),
-                                                first->bytecount);
+       netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
        i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        /* Algorithm to optimize tail and RS bit setting:
@@ -2039,13 +2036,11 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
         * trigger a force WB.
         */
        if (skb->xmit_more  &&
-           !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
-                                                   tx_ring->queue_index))) {
+           !netif_xmit_stopped(txring_txq(tx_ring))) {
                tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
                tail_bump = false;
        } else if (!skb->xmit_more &&
-                  !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
-                                                      tx_ring->queue_index)) &&
+                  !netif_xmit_stopped(txring_txq(tx_ring)) &&
                   (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
                   (tx_ring->packet_stride < WB_STRIDE) &&
                   (desc_count < WB_STRIDE)) {
index 0112277e5882a995cc2d176a220a1391f74c77f8..84e561cc0166a7728c5eb73dd5cec620ceaacf7d 100644 (file)
@@ -445,4 +445,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
        return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
               (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
 }
+
+/**
+ * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+       return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
 #endif /* _I40E_TXRX_H_ */