net: ena: fix driver when PAGE_SIZE == 64kB
authorNetanel Belgazal <netanel@amazon.com>
Sun, 9 Sep 2018 08:15:21 +0000 (08:15 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 10 Oct 2018 06:53:21 +0000 (08:53 +0200)
[ Upstream commit ef5b0771d247379c90c8bf1332ff32f7f74bff7f ]

The buffer length field in the ena rx descriptor is 16 bit, and the
current driver passes a full page in each ena rx descriptor.
When PAGE_SIZE equals 64kB or more, the buffer length field becomes
zero.
To solve this issue, limit the ena Rx descriptor to use 16kB even
when allocating 64kB kernel pages. This change would not impact ena
device functionality, as 16kB is still larger than maximum MTU.

Signed-off-by: Netanel Belgazal <netanel@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h

index 0d9ce08ee3a900dbff4e5dda61bf9232a04dc6d2..1d92e034febcf5d79fe6a03504c7583232ba1719 100644 (file)
@@ -422,7 +422,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
                return -ENOMEM;
        }
 
-       dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+       dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
                           DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
                u64_stats_update_begin(&rx_ring->syncp);
@@ -439,7 +439,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
        rx_info->page_offset = 0;
        ena_buf = &rx_info->ena_buf;
        ena_buf->paddr = dma;
-       ena_buf->len = PAGE_SIZE;
+       ena_buf->len = ENA_PAGE_SIZE;
 
        return 0;
 }
@@ -456,7 +456,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
                return;
        }
 
-       dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+       dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
                       DMA_FROM_DEVICE);
 
        __free_page(page);
@@ -849,10 +849,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
        do {
                dma_unmap_page(rx_ring->dev,
                               dma_unmap_addr(&rx_info->ena_buf, paddr),
-                              PAGE_SIZE, DMA_FROM_DEVICE);
+                              ENA_PAGE_SIZE, DMA_FROM_DEVICE);
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
-                               rx_info->page_offset, len, PAGE_SIZE);
+                               rx_info->page_offset, len, ENA_PAGE_SIZE);
 
                netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
                          "rx skb updated. len %d. data_len %d\n",
index c5eaf7616939bc8ce7fea4996ae60ad4d90fd55c..008f2d594d402c6d6519a44c54a6e03bd52daeff 100644 (file)
@@ -321,4 +321,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
 
 int ena_get_sset_count(struct net_device *netdev, int sset);
 
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passas 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
 #endif /* !(ENA_H) */