ethernet/intel: Use napi_alloc_skb
authorAlexander Duyck <alexander.h.duyck@redhat.com>
Wed, 10 Dec 2014 03:40:56 +0000 (19:40 -0800)
committerDavid S. Miller <davem@davemloft.net>
Wed, 10 Dec 2014 18:31:57 +0000 (13:31 -0500)
This change replaces calls to netdev_alloc_skb_ip_align with
napi_alloc_skb.  The advantage of napi_alloc_skb is currently the fact that
the page allocation doesn't make use of any irq disable calls.

There are few spots where I couldn't replace the calls as the buffer
allocation routine is called as a part of init which is outside of the
softirq context.

Cc: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

index 862d1989ae1c4adc9ca61d2dfa4ae7a1d213f707..83140cbb5f0119d6ab478e3491a44c4813f152f7 100644 (file)
@@ -4100,7 +4100,7 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
                                          unsigned int bufsz)
 {
-       struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz);
+       struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
 
        if (unlikely(!skb))
                adapter->alloc_rx_buff_failed++;
index 88936aa0029d72c2a7af75ccd13f86faa545eece..5c82c80655017bcb40fe10f35c5c2d7341ef495d 100644 (file)
@@ -1016,7 +1016,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                 */
                if (length < copybreak) {
                        struct sk_buff *new_skb =
-                           netdev_alloc_skb_ip_align(netdev, length);
+                               napi_alloc_skb(&adapter->napi, length);
                        if (new_skb) {
                                skb_copy_to_linear_data_offset(new_skb,
                                                               -NET_IP_ALIGN,
index 91516aed373e1db04085a85fff02b6e41d604703..ee1ecb146df7de40e126cbffca7c80528254ef8e 100644 (file)
@@ -308,8 +308,8 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
 #endif
 
                /* allocate a skb to store the frags */
-               skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                               FM10K_RX_HDR_LEN);
+               skb = napi_alloc_skb(&rx_ring->q_vector->napi,
+                                    FM10K_RX_HDR_LEN);
                if (unlikely(!skb)) {
                        rx_ring->rx_stats.alloc_failed++;
                        return NULL;
index f04ad13f715980d6677820679d60670bbd122f10..485d2c609d5de96e9c5ec96abed7274d38d96403 100644 (file)
@@ -6644,8 +6644,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
 #endif
 
                /* allocate a skb to store the frags */
-               skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                               IGB_RX_HDR_LEN);
+               skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
                if (unlikely(!skb)) {
                        rx_ring->rx_stats.alloc_failed++;
                        return NULL;
index 055961b0f24b05abdc66f2bdab71e83093e9cdad..aa87605b144a23a98d3ed7121c035c8703ad4627 100644 (file)
@@ -1963,7 +1963,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
  * this should improve performance for small packets with large amounts
  * of reassembly being done in the stack
  */
-static void ixgb_check_copybreak(struct net_device *netdev,
+static void ixgb_check_copybreak(struct napi_struct *napi,
                                 struct ixgb_buffer *buffer_info,
                                 u32 length, struct sk_buff **skb)
 {
@@ -1972,7 +1972,7 @@ static void ixgb_check_copybreak(struct net_device *netdev,
        if (length > copybreak)
                return;
 
-       new_skb = netdev_alloc_skb_ip_align(netdev, length);
+       new_skb = napi_alloc_skb(napi, length);
        if (!new_skb)
                return;
 
@@ -2064,7 +2064,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
                        goto rxdesc_done;
                }
 
-               ixgb_check_copybreak(netdev, buffer_info, length, &skb);
+               ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
 
                /* Good Receive */
                skb_put(skb, length);
index fbd52924ee34a2b8395fa5d8581129cf5c94ac2b..798b05556e1bcced693bc680ac076992e1a59e85 100644 (file)
@@ -1913,8 +1913,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
 #endif
 
                /* allocate a skb to store the frags */
-               skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                               IXGBE_RX_HDR_SIZE);
+               skb = napi_alloc_skb(&rx_ring->q_vector->napi,
+                                    IXGBE_RX_HDR_SIZE);
                if (unlikely(!skb)) {
                        rx_ring->rx_stats.alloc_rx_buff_failed++;
                        return NULL;