mlx4: convert to SKB paged frag API.
authorIan Campbell <Ian.Campbell@citrix.com>
Wed, 19 Oct 2011 23:01:45 +0000 (23:01 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 21 Oct 2011 06:52:52 +0000 (02:52 -0400)
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Cc: netdev@vger.kernel.org
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c

index 9aec8b836fe3287769c70cd479937fbe6114fab3..b89c36dbf5b3ea7927fb46252819210bdd55ec26 100644 (file)
@@ -44,7 +44,7 @@
 
 static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
                              struct mlx4_en_rx_desc *rx_desc,
-                             struct skb_frag_struct *skb_frags,
+                             struct page_frag *skb_frags,
                              struct mlx4_en_rx_alloc *ring_alloc,
                              int i)
 {
@@ -61,7 +61,7 @@ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
                        return -ENOMEM;
 
                skb_frags[i].page = page_alloc->page;
-               skb_frags[i].page_offset = page_alloc->offset;
+               skb_frags[i].offset = page_alloc->offset;
                page_alloc->page = page;
                page_alloc->offset = frag_info->frag_align;
        } else {
@@ -69,11 +69,11 @@ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
                get_page(page);
 
                skb_frags[i].page = page;
-               skb_frags[i].page_offset = page_alloc->offset;
+               skb_frags[i].offset = page_alloc->offset;
                page_alloc->offset += frag_info->frag_stride;
        }
        dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
-                            skb_frags[i].page_offset, frag_info->frag_size,
+                            skb_frags[i].offset, frag_info->frag_size,
                             PCI_DMA_FROMDEVICE);
        rx_desc->data[i].addr = cpu_to_be64(dma);
        return 0;
@@ -157,8 +157,8 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
                                   struct mlx4_en_rx_ring *ring, int index)
 {
        struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
-       struct skb_frag_struct *skb_frags = ring->rx_info +
-                                           (index << priv->log_rx_info);
+       struct page_frag *skb_frags = ring->rx_info +
+                                     (index << priv->log_rx_info);
        int i;
 
        for (i = 0; i < priv->num_frags; i++)
@@ -183,7 +183,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
                                 int index)
 {
        struct mlx4_en_dev *mdev = priv->mdev;
-       struct skb_frag_struct *skb_frags;
+       struct page_frag *skb_frags;
        struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
        dma_addr_t dma;
        int nr;
@@ -194,7 +194,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
                dma = be64_to_cpu(rx_desc->data[nr].addr);
 
                en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
-               pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags[nr]),
+               pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
                                 PCI_DMA_FROMDEVICE);
                put_page(skb_frags[nr].page);
        }
@@ -403,7 +403,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
 /* Unmap a completed descriptor and free unused pages */
 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
                                    struct mlx4_en_rx_desc *rx_desc,
-                                   struct skb_frag_struct *skb_frags,
+                                   struct page_frag *skb_frags,
                                    struct sk_buff *skb,
                                    struct mlx4_en_rx_alloc *page_alloc,
                                    int length)
@@ -421,9 +421,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
                        break;
 
                /* Save page reference in skb */
-               skb_frags_rx[nr].page = skb_frags[nr].page;
-               skb_frag_size_set(&skb_frags_rx[nr], skb_frag_size(&skb_frags[nr]));
-               skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
+               __skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
+               skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
+               skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
                skb->truesize += frag_info->frag_stride;
                dma = be64_to_cpu(rx_desc->data[nr].addr);
 
@@ -446,7 +446,7 @@ fail:
         * the descriptor) of this packet; remaining fragments are reused... */
        while (nr > 0) {
                nr--;
-               put_page(skb_frags_rx[nr].page);
+               __skb_frag_unref(&skb_frags_rx[nr]);
        }
        return 0;
 }
@@ -454,7 +454,7 @@ fail:
 
 static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
                                      struct mlx4_en_rx_desc *rx_desc,
-                                     struct skb_frag_struct *skb_frags,
+                                     struct page_frag *skb_frags,
                                      struct mlx4_en_rx_alloc *page_alloc,
                                      unsigned int length)
 {
@@ -475,7 +475,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
 
        /* Get pointer to first fragment so we could copy the headers into the
         * (linear part of the) skb */
-       va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
+       va = page_address(skb_frags[0].page) + skb_frags[0].offset;
 
        if (length <= SMALL_PACKET_SIZE) {
                /* We are copying all relevant data to the skb - temporarily
@@ -533,7 +533,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_cqe *cqe;
        struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
-       struct skb_frag_struct *skb_frags;
+       struct page_frag *skb_frags;
        struct mlx4_en_rx_desc *rx_desc;
        struct sk_buff *skb;
        int index;
index 75dda26189fd4ffbcfa78dff2f25af2f5b893df5..75338eb88e88c231903d47d7246af0bc67dac14e 100644 (file)
@@ -460,26 +460,13 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
                }
 }
 
-static void *get_frag_ptr(struct sk_buff *skb)
-{
-       struct skb_frag_struct *frag =  &skb_shinfo(skb)->frags[0];
-       struct page *page = frag->page;
-       void *ptr;
-
-       ptr = page_address(page);
-       if (unlikely(!ptr))
-               return NULL;
-
-       return ptr + frag->page_offset;
-}
-
 static int is_inline(struct sk_buff *skb, void **pfrag)
 {
        void *ptr;
 
        if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
                if (skb_shinfo(skb)->nr_frags == 1) {
-                       ptr = get_frag_ptr(skb);
+                       ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]);
                        if (unlikely(!ptr))
                                return 0;
 
@@ -756,8 +743,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                /* Map fragments */
                for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
                        frag = &skb_shinfo(skb)->frags[i];
-                       dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
-                                          skb_frag_size(frag), PCI_DMA_TODEVICE);
+                       dma = skb_frag_dma_map(&mdev->dev->pdev->dev, frag,
+                                              0, skb_frag_size(frag),
+                                              DMA_TO_DEVICE);
                        data->addr = cpu_to_be64(dma);
                        data->lkey = cpu_to_be32(mdev->mr.key);
                        wmb();