struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
struct i40e_tx_buffer *first;
u32 tx_flags = 0;
+ int fso, count;
u8 hdr_len = 0;
u8 sof = 0;
u8 eof = 0;
- int fso;
if (i40e_fcoe_set_skb_header(skb))
goto out_drop;
- if (!i40e_xmit_descriptor_count(skb, tx_ring))
+ count = i40e_xmit_descriptor_count(skb);
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 4 desc gap to avoid the cache line where head is,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
+ }
/* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
*
* Returns -EBUSY if a stop is needed, else 0
**/
-static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */
return 0;
}
-/**
- * i40e_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size: the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-#ifdef I40E_FCOE
-inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-#else
-static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-#endif
-{
- if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
- return 0;
- return __i40e_maybe_stop_tx(tx_ring, size);
-}
-
/**
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer
tx_ring->next_to_use = i;
}
-/**
- * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
- * @skb: send buffer
- * @tx_ring: ring to send buffer on
- *
- * Returns number of data descriptors needed for this skb. Returns 0 to indicate
- * there is not enough descriptors available in this ring since we need at least
- * one descriptor.
- **/
-#ifdef I40E_FCOE
-inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
-#else
-static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
-#endif
-{
- unsigned int f;
- int count = 0;
-
- /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
- * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
- * + 4 desc gap to avoid the cache line where head is,
- * + 1 desc for context descriptor,
- * otherwise try next time
- */
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-
- count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
- tx_ring->tx_stats.tx_busy++;
- return 0;
- }
- return count;
-}
-
/**
* i40e_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer
__be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
+ int tso, count;
int tsyn;
- int tso;
/* prefetch the data, we'll need it later */
prefetch(skb->data);
- if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ count = i40e_xmit_descriptor_count(skb);
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 4 desc gap to avoid the cache line where head is,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
+ }
/* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset);
-int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
-int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring);
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, u32 *flags);
#endif
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
/**
* i40e_get_head - Retrieve head from head writeback
return le32_to_cpu(*(volatile __le32 *)head);
}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
+{
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ int count = 0, size = skb_headlen(skb);
+
+ for (;;) {
+ count += TXD_USE_COUNT(size);
+
+ if (!nr_frags--)
+ break;
+
+ size = skb_frag_size(frag++);
+ }
+
+ return count;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40e_maybe_stop_tx(tx_ring, size);
+}
#endif /* _I40E_TXRX_H_ */
*
* Returns -EBUSY if a stop is needed, else 0
**/
-static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */
return 0;
}
-/**
- * i40evf_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size: the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
- if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
- return 0;
- return __i40evf_maybe_stop_tx(tx_ring, size);
-}
-
/**
* i40evf_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
first->bytecount);
- i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
* if xmit_more is supported
tx_ring->next_to_use = i;
}
-/**
- * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
- * @skb: send buffer
- * @tx_ring: ring to send buffer on
- *
- * Returns number of data descriptors needed for this skb. Returns 0 to indicate
- * there is not enough descriptors available in this ring since we need at least
- * one descriptor.
- **/
-static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
-{
- unsigned int f;
- int count = 0;
-
- /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
- * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
- * + 4 desc gap to avoid the cache line where head is,
- * + 1 desc for context descriptor,
- * otherwise try next time
- */
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-
- count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
- tx_ring->tx_stats.tx_busy++;
- return 0;
- }
- return count;
-}
-
/**
* i40e_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer
__be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
- int tso;
+ int tso, count;
/* prefetch the data, we'll need it later */
prefetch(skb->data);
- if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
+ count = i40e_xmit_descriptor_count(skb);
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 4 desc gap to avoid the cache line where head is,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
+ }
/* prepare the xmit flags */
if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
int i40evf_napi_poll(struct napi_struct *napi, int budget);
void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
/**
* i40e_get_head - Retrieve head from head writeback
return le32_to_cpu(*(volatile __le32 *)head);
}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
+{
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ int count = 0, size = skb_headlen(skb);
+
+ for (;;) {
+ count += TXD_USE_COUNT(size);
+
+ if (!nr_frags--)
+ break;
+
+ size = skb_frag_size(frag++);
+ }
+
+ return count;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40evf_maybe_stop_tx(tx_ring, size);
+}
#endif /* _I40E_TXRX_H_ */