pch_gbe: replace private tx ring lock with common netif_tx_lock
authorfrançois romieu <romieu@fr.zoreil.com>
Wed, 27 Apr 2016 21:29:44 +0000 (23:29 +0200)
committerDavid S. Miller <davem@davemloft.net>
Thu, 28 Apr 2016 21:19:58 +0000 (17:19 -0400)
pch_gbe_tx_ring.tx_lock is only used in the hard_xmit handler and
in the transmit completion reaper called from NAPI context.

Compile-tested only. Potential victims Cced.

Someone more knowledgeable may check if pch_gbe_tx_queue could
have some use for a mmiowb.

Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Andy Cress <andy.cress@us.kontron.com>
Cc: bryan@fossetcon.org
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c

index 2a55d6d53ee61635806725d5167638b926f63363..8d710a3b4db0626e44ce019d115c31a061bcb673 100644 (file)
@@ -481,7 +481,6 @@ struct pch_gbe_buffer {
 
 /**
  * struct pch_gbe_tx_ring - tx ring information
- * @tx_lock:   spinlock structs
  * @desc:      pointer to the descriptor ring memory
  * @dma:       physical address of the descriptor ring
  * @size:      length of descriptor ring in bytes
@@ -491,7 +490,6 @@ struct pch_gbe_buffer {
  * @buffer_info:       array of buffer information structs
  */
 struct pch_gbe_tx_ring {
-       spinlock_t tx_lock;
        struct pch_gbe_tx_desc *desc;
        dma_addr_t dma;
        unsigned int size;
index ca4add7494106256aa171951c4bbe13ac96b50fa..3cd87a41ac92c42f4f75716753dccd57eaba9c1a 100644 (file)
@@ -1640,7 +1640,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
                   cleaned_count);
        if (cleaned_count > 0)  { /*skip this if nothing cleaned*/
                /* Recover from running out of Tx resources in xmit_frame */
-               spin_lock(&tx_ring->tx_lock);
+               netif_tx_lock(adapter->netdev);
                if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
                {
                        netif_wake_queue(adapter->netdev);
@@ -1652,7 +1652,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
 
                netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
                           tx_ring->next_to_clean);
-               spin_unlock(&tx_ring->tx_lock);
+               netif_tx_unlock(adapter->netdev);
        }
        return cleaned;
 }
@@ -1805,7 +1805,6 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-       spin_lock_init(&tx_ring->tx_lock);
 
        for (desNo = 0; desNo < tx_ring->count; desNo++) {
                tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
@@ -2135,13 +2134,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
        struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
-       unsigned long flags;
-
-       spin_lock_irqsave(&tx_ring->tx_lock, flags);
 
        if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
                netif_stop_queue(netdev);
-               spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
                netdev_dbg(netdev,
                           "Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
                           tx_ring->next_to_use, tx_ring->next_to_clean);
@@ -2150,7 +2145,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
        /* CRC,ITAG no support */
        pch_gbe_tx_queue(adapter, tx_ring, skb);
-       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
        return NETDEV_TX_OK;
 }