static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
-static void igb_xmit_flush(struct net_device *netdev, u16 queue);
static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int);
.ndo_open = igb_open,
.ndo_stop = igb_close,
.ndo_start_xmit = igb_xmit_frame,
- .ndo_xmit_flush = igb_xmit_flush,
.ndo_get_stats64 = igb_get_stats64,
.ndo_set_rx_mode = igb_set_rx_mode,
.ndo_set_mac_address = igb_set_mac,
tx_ring->next_to_use = i;
+ if (!skb->xmit_more) {
+ writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
+ }
return;
dma_error:
return NETDEV_TX_OK;
}
-static struct igb_ring *__igb_tx_queue_mapping(struct igb_adapter *adapter, unsigned int r_idx)
+static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
+ struct sk_buff *skb)
{
+ unsigned int r_idx = skb->queue_mapping;
+
if (r_idx >= adapter->num_tx_queues)
r_idx = r_idx % adapter->num_tx_queues;
return adapter->tx_ring[r_idx];
}
-static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
- struct sk_buff *skb)
-{
- return __igb_tx_queue_mapping(adapter, skb->queue_mapping);
-}
-
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
}
-static void igb_xmit_flush(struct net_device *netdev, u16 queue)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct igb_ring *tx_ring;
-
- tx_ring = __igb_tx_queue_mapping(adapter, queue);
-
- writel(tx_ring->next_to_use, tx_ring->tail);
-
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
-}
-
/**
* igb_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
}
}
- return NETDEV_TX_OK;
-}
+ if (!skb->xmit_more)
+ virtqueue_kick(sq->vq);
-static void xmit_flush(struct net_device *dev, u16 qnum)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- struct send_queue *sq = &vi->sq[qnum];
-
- virtqueue_kick(sq->vq);
+ return NETDEV_TX_OK;
}
/*
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
.ndo_start_xmit = start_xmit,
- .ndo_xmit_flush = xmit_flush,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = virtnet_set_mac_address,
.ndo_set_rx_mode = virtnet_set_rx_mode,
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
- * void (*ndo_xmit_flush)(struct net_device *dev, u16 queue);
- * A driver implements this function when it wishes to support
- * deferred TX queue flushing. The idea is that the expensive
- * operation to trigger TX queue processing can be done after
- * N calls to ndo_start_xmit rather than being done every single
- * time. In this regime ndo_start_xmit will be called one or more
- * times, and then a final ndo_xmit_flush call will be made to
- * have the driver tell the device about the new pending TX queue
- * entries. The kernel keeps track of which queues need flushing
- * by monitoring skb->queue_mapping of the packets it submits to
- * ndo_start_xmit. This is the queue value that will be passed
- * to ndo_xmit_flush.
- *
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
* Called to decide which queue to when device supports multiple
int (*ndo_stop)(struct net_device *dev);
netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
- void (*ndo_xmit_flush)(struct net_device *dev, u16 queue);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv,
static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
struct sk_buff *skb, struct net_device *dev)
{
- netdev_tx_t ret;
- u16 q;
-
- q = skb->queue_mapping;
- ret = ops->ndo_start_xmit(skb, dev);
- if (dev_xmit_complete(ret) && ops->ndo_xmit_flush)
- ops->ndo_xmit_flush(dev, q);
-
- return ret;
+ skb->xmit_more = 0;
+ return ops->ndo_start_xmit(skb, dev);
}
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev)
* @tc_verd: traffic control verdict
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
+ * @xmit_more: More SKBs are pending for this queue
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
__u16 queue_mapping;
kmemcheck_bitfield_begin(flags2);
+ __u8 xmit_more:1;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif