#include <linux/mv643xx_eth.h>
#include <linux/io.h>
#include <linux/types.h>
+#include <linux/inet_lro.h>
#include <asm/system.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
#define RX_ENABLE_INTERRUPT 0x20000000
#define RX_FIRST_DESC 0x08000000
#define RX_LAST_DESC 0x04000000
+#define RX_IP_HDR_OK 0x02000000
+#define RX_PKT_IS_IPV4 0x01000000
+#define RX_PKT_IS_ETHERNETV2 0x00800000
+#define RX_PKT_LAYER4_TYPE_MASK 0x00600000
+#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
+#define RX_PKT_IS_VLAN_TAGGED 0x00080000
/* TX descriptor command */
#define TX_ENABLE_INTERRUPT 0x00800000
u32 late_collision;
};
+struct lro_counters {
+ u32 lro_aggregated;
+ u32 lro_flushed;
+ u32 lro_no_desc;
+};
+
struct rx_queue {
int index;
dma_addr_t rx_desc_dma;
int rx_desc_area_size;
struct sk_buff **rx_skb;
+
+#ifdef CONFIG_MV643XX_ETH_LRO
+ struct net_lro_mgr lro_mgr;
+ struct net_lro_desc lro_arr[8];
+#endif
};
struct tx_queue {
spinlock_t mib_counters_lock;
struct mib_counters mib_counters;
+ struct lro_counters lro_counters;
+
struct work_struct tx_timeout_task;
struct napi_struct napi;
/* rx napi ******************************************************************/
+#ifdef CONFIG_MV643XX_ETH_LRO
+static int
+mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
+ u64 *hdr_flags, void *priv)
+{
+ unsigned long cmd_sts = (unsigned long)priv;
+
+ /*
+ * Make sure that this packet is Ethernet II, is not VLAN
+ * tagged, is IPv4, has a valid IP header, and is TCP.
+ */
+ if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
+ RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK |
+ RX_PKT_IS_VLAN_TAGGED)) !=
+ (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
+ RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4))
+ return -1;
+
+ skb_reset_network_header(skb);
+ skb_set_transport_header(skb, ip_hdrlen(skb));
+ *iphdr = ip_hdr(skb);
+ *tcph = tcp_hdr(skb);
+ *hdr_flags = LRO_IPV4 | LRO_TCP;
+
+ return 0;
+}
+#endif
+
static int rxq_process(struct rx_queue *rxq, int budget)
{
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
struct net_device_stats *stats = &mp->dev->stats;
+ int lro_flush_needed;
int rx;
+ lro_flush_needed = 0;
rx = 0;
while (rx < budget && rxq->rx_desc_count) {
struct rx_desc *rx_desc;
if (cmd_sts & LAYER_4_CHECKSUM_OK)
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->protocol = eth_type_trans(skb, mp->dev);
- netif_receive_skb(skb);
+
+#ifdef CONFIG_MV643XX_ETH_LRO
+ if (skb->dev->features & NETIF_F_LRO &&
+ skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts);
+ lro_flush_needed = 1;
+ } else
+#endif
+ netif_receive_skb(skb);
continue;
dev_kfree_skb(skb);
}
+#ifdef CONFIG_MV643XX_ETH_LRO
+ if (lro_flush_needed)
+ lro_flush_all(&rxq->lro_mgr);
+#endif
+
if (rx < budget)
mp->work_rx &= ~(1 << rxq->index);
return stats;
}
+static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp)
+{
+ u32 lro_aggregated = 0;
+ u32 lro_flushed = 0;
+ u32 lro_no_desc = 0;
+ int i;
+
+#ifdef CONFIG_MV643XX_ETH_LRO
+ for (i = 0; i < mp->rxq_count; i++) {
+ struct rx_queue *rxq = mp->rxq + i;
+
+ lro_aggregated += rxq->lro_mgr.stats.aggregated;
+ lro_flushed += rxq->lro_mgr.stats.flushed;
+ lro_no_desc += rxq->lro_mgr.stats.no_desc;
+ }
+#endif
+
+ mp->lro_counters.lro_aggregated = lro_aggregated;
+ mp->lro_counters.lro_flushed = lro_flushed;
+ mp->lro_counters.lro_no_desc = lro_no_desc;
+}
+
static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
{
return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
{ #m, FIELD_SIZEOF(struct mib_counters, m), \
-1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
+#define LROSTAT(m) \
+ { #m, FIELD_SIZEOF(struct lro_counters, m), \
+ -1, offsetof(struct mv643xx_eth_private, lro_counters.m) }
+
static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
SSTAT(rx_packets),
SSTAT(tx_packets),
MIBSTAT(bad_crc_event),
MIBSTAT(collision),
MIBSTAT(late_collision),
+ LROSTAT(lro_aggregated),
+ LROSTAT(lro_flushed),
+ LROSTAT(lro_no_desc),
};
static int
mv643xx_eth_get_stats(dev);
mib_counters_update(mp);
+ mv643xx_eth_grab_lro_stats(mp);
for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
const struct mv643xx_eth_stats *stat;
.set_sg = ethtool_op_set_sg,
.get_strings = mv643xx_eth_get_strings,
.get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = ethtool_op_set_flags,
.get_sset_count = mv643xx_eth_get_sset_count,
};
nexti * sizeof(struct rx_desc);
}
+#ifdef CONFIG_MV643XX_ETH_LRO
+ rxq->lro_mgr.dev = mp->dev;
+ memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats));
+ rxq->lro_mgr.features = LRO_F_NAPI;
+ rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
+ rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+ rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr);
+ rxq->lro_mgr.max_aggr = 32;
+ rxq->lro_mgr.frag_align_pad = 0;
+ rxq->lro_mgr.lro_arr = rxq->lro_arr;
+ rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header;
+
+ memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr));
+#endif
+
return 0;