net/mlx5e: Use the right DMA free function on TX path
authorAchiad Shochat <achiad@mellanox.com>
Thu, 12 Nov 2015 17:35:28 +0000 (19:35 +0200)
committerDavid S. Miller <davem@davemloft.net>
Sun, 15 Nov 2015 23:43:40 +0000 (18:43 -0500)
On xmit path we use skb_frag_dma_map() which is using dma_map_page(),
while upon completion we dma-unmap the skb fragments using
dma_unmap_single() rather than dma_unmap_page().

To fix this, we now save the dma map type on xmit path and use this
info to call the right dma unmap method upon TX completion.

Signed-off-by: Achiad Shochat <achiad@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

index f2ae62dd8c094ac4c11917f29d65fbb63c2194a2..22e72bf1ae4894a846d790d8973b87aee3ac6431 100644 (file)
@@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb {
 
 #define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
 
+enum mlx5e_dma_map_type {
+       MLX5E_DMA_MAP_SINGLE,
+       MLX5E_DMA_MAP_PAGE
+};
+
 struct mlx5e_sq_dma {
-       dma_addr_t addr;
-       u32        size;
+       dma_addr_t              addr;
+       u32                     size;
+       enum mlx5e_dma_map_type type;
 };
 
 enum {
index f687ebf20d9ca0b486cc2328a2713382cebdedea..1341b1d3c421d2a0f2050b7c0103853cbecf1cd6 100644 (file)
@@ -61,39 +61,47 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
        }
 }
 
-static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
-                                     u32 *size)
+static inline void mlx5e_tx_dma_unmap(struct device *pdev,
+                                     struct mlx5e_sq_dma *dma)
 {
-       sq->dma_fifo_pc--;
-       *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
-       *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
-}
-
-static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
-{
-       dma_addr_t addr;
-       u32 size;
-       int i;
-
-       for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
-               mlx5e_dma_pop_last_pushed(sq, &addr, &size);
-               dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+       switch (dma->type) {
+       case MLX5E_DMA_MAP_SINGLE:
+               dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+               break;
+       case MLX5E_DMA_MAP_PAGE:
+               dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+               break;
+       default:
+               WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
        }
 }
 
-static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
-                                 u32 size)
+static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
+                                 dma_addr_t addr,
+                                 u32 size,
+                                 enum mlx5e_dma_map_type map_type)
 {
        sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
        sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
+       sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
        sq->dma_fifo_pc++;
 }
 
-static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
-                                u32 *size)
+static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
+{
+       return &sq->dma_fifo[i & sq->dma_fifo_mask];
+}
+
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
 {
-       *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
-       *size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
+       int i;
+
+       for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
+               struct mlx5e_sq_dma *last_pushed_dma =
+                       mlx5e_dma_get(sq, --sq->dma_fifo_pc);
+
+               mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
+       }
 }
 
 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -225,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                dseg->lkey       = sq->mkey_be;
                dseg->byte_count = cpu_to_be32(headlen);
 
-               mlx5e_dma_push(sq, dma_addr, headlen);
+               mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
                MLX5E_TX_SKB_CB(skb)->num_dma++;
 
                dseg++;
@@ -244,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                dseg->lkey       = sq->mkey_be;
                dseg->byte_count = cpu_to_be32(fsz);
 
-               mlx5e_dma_push(sq, dma_addr, fsz);
+               mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
                MLX5E_TX_SKB_CB(skb)->num_dma++;
 
                dseg++;
@@ -360,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
                        }
 
                        for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
-                               dma_addr_t addr;
-                               u32 size;
+                               struct mlx5e_sq_dma *dma =
+                                       mlx5e_dma_get(sq, dma_fifo_cc++);
 
-                               mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
-                               dma_fifo_cc++;
-                               dma_unmap_single(sq->pdev, addr, size,
-                                                DMA_TO_DEVICE);
+                               mlx5e_tx_dma_unmap(sq->pdev, dma);
                        }
 
                        npkts++;