struct igb_ring *tx_ring;
union e1000_adv_tx_desc *tx_desc;
struct my_u0 { u64 a; u64 b; } *u0;
- struct igb_buffer *buffer_info;
struct igb_ring *rx_ring;
union e1000_adv_rx_desc *rx_desc;
u32 staterr;
printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
" leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
+ struct igb_tx_buffer *buffer_info;
tx_ring = adapter->tx_ring[n];
- buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
(u64)buffer_info->dma,
"leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ struct igb_tx_buffer *buffer_info;
tx_desc = IGB_TX_DESC(tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
+ buffer_info = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
" %04X %3X %016llX %p", i,
"<-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
- buffer_info = &rx_ring->buffer_info[i];
+ struct igb_rx_buffer *buffer_info;
+ buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IGB_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
struct device *dev = tx_ring->dev;
int size;
- size = sizeof(struct igb_buffer) * tx_ring->count;
- tx_ring->buffer_info = vzalloc(size);
- if (!tx_ring->buffer_info)
+ size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
goto err;
/* round up to nearest 4K */
return 0;
err:
- vfree(tx_ring->buffer_info);
+ vfree(tx_ring->tx_buffer_info);
dev_err(dev,
"Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
struct device *dev = rx_ring->dev;
int size, desc_len;
- size = sizeof(struct igb_buffer) * rx_ring->count;
- rx_ring->buffer_info = vzalloc(size);
- if (!rx_ring->buffer_info)
+ size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
goto err;
desc_len = sizeof(union e1000_adv_rx_desc);
return 0;
err:
- vfree(rx_ring->buffer_info);
- rx_ring->buffer_info = NULL;
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
dev_err(dev, "Unable to allocate memory for the receive descriptor"
" ring\n");
return -ENOMEM;
{
igb_clean_tx_ring(tx_ring);
- vfree(tx_ring->buffer_info);
- tx_ring->buffer_info = NULL;
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
/* if not set, then don't free */
if (!tx_ring->desc)
}
void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
- struct igb_buffer *buffer_info)
+ struct igb_tx_buffer *buffer_info)
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
**/
static void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
- struct igb_buffer *buffer_info;
+ struct igb_tx_buffer *buffer_info;
unsigned long size;
unsigned int i;
- if (!tx_ring->buffer_info)
+ if (!tx_ring->tx_buffer_info)
return;
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
- buffer_info = &tx_ring->buffer_info[i];
+ buffer_info = &tx_ring->tx_buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
- size = sizeof(struct igb_buffer) * tx_ring->count;
- memset(tx_ring->buffer_info, 0, size);
+ size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
/* Zero out the descriptor ring */
memset(tx_ring->desc, 0, tx_ring->size);
{
igb_clean_rx_ring(rx_ring);
- vfree(rx_ring->buffer_info);
- rx_ring->buffer_info = NULL;
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
/* if not set, then don't free */
if (!rx_ring->desc)
unsigned long size;
u16 i;
- if (!rx_ring->buffer_info)
+ if (!rx_ring->rx_buffer_info)
return;
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- struct igb_buffer *buffer_info = &rx_ring->buffer_info[i];
+ struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
if (buffer_info->dma) {
dma_unmap_single(rx_ring->dev,
buffer_info->dma,
}
}
- size = sizeof(struct igb_buffer) * rx_ring->count;
- memset(rx_ring->buffer_info, 0, size);
+ size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
struct e1000_adv_tx_context_desc *context_desc;
unsigned int i;
int err;
- struct igb_buffer *buffer_info;
+ struct igb_tx_buffer *buffer_info;
u32 info = 0, tu_cmd = 0;
u32 mss_l4len_idx;
u8 l4len;
i = tx_ring->next_to_use;
- buffer_info = &tx_ring->buffer_info[i];
+ buffer_info = &tx_ring->tx_buffer_info[i];
context_desc = IGB_TX_CTXTDESC(tx_ring, i);
/* VLAN MACLEN IPLEN */
if (tx_flags & IGB_TX_FLAGS_VLAN)
{
struct e1000_adv_tx_context_desc *context_desc;
struct device *dev = tx_ring->dev;
- struct igb_buffer *buffer_info;
+ struct igb_tx_buffer *buffer_info;
u32 info = 0, tu_cmd = 0;
unsigned int i;
if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
(tx_flags & IGB_TX_FLAGS_VLAN)) {
i = tx_ring->next_to_use;
- buffer_info = &tx_ring->buffer_info[i];
+ buffer_info = &tx_ring->tx_buffer_info[i];
context_desc = IGB_TX_CTXTDESC(tx_ring, i);
if (tx_flags & IGB_TX_FLAGS_VLAN)
static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
unsigned int first)
{
- struct igb_buffer *buffer_info;
+ struct igb_tx_buffer *buffer_info;
struct device *dev = tx_ring->dev;
unsigned int hlen = skb_headlen(skb);
unsigned int count = 0, i;
i = tx_ring->next_to_use;
- buffer_info = &tx_ring->buffer_info[i];
+ buffer_info = &tx_ring->tx_buffer_info[i];
BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
buffer_info->length = hlen;
/* set time_stamp *before* dma to help avoid a possible race */
if (i == tx_ring->count)
i = 0;
- buffer_info = &tx_ring->buffer_info[i];
+ buffer_info = &tx_ring->tx_buffer_info[i];
BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
}
- tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;
+ buffer_info->skb = skb;
+ buffer_info->tx_flags = skb_shinfo(skb)->tx_flags;
/* multiply data chunks by size of headers */
- tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
- tx_ring->buffer_info[i].gso_segs = gso_segs;
- tx_ring->buffer_info[first].next_to_watch = i;
+ buffer_info->bytecount = ((gso_segs - 1) * hlen) + skb->len;
+ buffer_info->gso_segs = gso_segs;
+ tx_ring->tx_buffer_info[first].next_to_watch = i;
return ++count;
if (i == 0)
i = tx_ring->count;
i--;
- buffer_info = &tx_ring->buffer_info[i];
+ buffer_info = &tx_ring->tx_buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
u8 hdr_len)
{
union e1000_adv_tx_desc *tx_desc;
- struct igb_buffer *buffer_info;
+ struct igb_tx_buffer *buffer_info;
u32 olinfo_status = 0, cmd_type_len;
unsigned int i = tx_ring->next_to_use;
olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
do {
- buffer_info = &tx_ring->buffer_info[i];
+ buffer_info = &tx_ring->tx_buffer_info[i];
tx_desc = IGB_TX_DESC(tx_ring, i);
tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
tx_desc->read.cmd_type_len =
count = igb_tx_map(tx_ring, skb, first);
if (!count) {
dev_kfree_skb_any(skb);
- tx_ring->buffer_info[first].time_stamp = 0;
+ tx_ring->tx_buffer_info[first].time_stamp = 0;
tx_ring->next_to_use = first;
return NETDEV_TX_OK;
}
/**
* igb_tx_hwtstamp - utility function which checks for TX time stamp
* @q_vector: pointer to q_vector containing needed info
- * @buffer: pointer to igb_buffer structure
+ * @buffer: pointer to igb_tx_buffer structure
*
* If we were asked to do hardware stamping and such a time stamp is
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
*/
-static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
+static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
+ struct igb_tx_buffer *buffer_info)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
{
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx_ring;
- struct igb_buffer *tx_buffer;
+ struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = q_vector->tx_work_limit;
if (test_bit(__IGB_DOWN, &adapter->state))
return true;
- tx_buffer = &tx_ring->buffer_info[i];
+ tx_buffer = &tx_ring->tx_buffer_info[i];
tx_desc = IGB_TX_DESC(tx_ring, i);
for (; budget; budget--) {
i++;
if (unlikely(i == tx_ring->count)) {
i = 0;
- tx_buffer = tx_ring->buffer_info;
+ tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IGB_TX_DESC(tx_ring, 0);
}
} while (eop_desc);
if (tx_ring->detect_tx_hung) {
struct e1000_hw *hw = &adapter->hw;
- u16 eop = tx_ring->buffer_info[i].next_to_watch;
+ u16 eop = tx_ring->tx_buffer_info[i].next_to_watch;
union e1000_adv_tx_desc *eop_desc;
eop_desc = IGB_TX_DESC(tx_ring, eop);
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
tx_ring->detect_tx_hung = false;
- if (tx_ring->buffer_info[i].time_stamp &&
- time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
+ if (tx_ring->tx_buffer_info[i].time_stamp &&
+ time_after(jiffies, tx_ring->tx_buffer_info[i].time_stamp +
(adapter->tx_timeout_factor * HZ)) &&
!(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
readl(tx_ring->tail),
tx_ring->next_to_use,
tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
+ tx_ring->tx_buffer_info[eop].time_stamp,
eop,
jiffies,
eop_desc->wb.status);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
while (staterr & E1000_RXD_STAT_DD) {
- struct igb_buffer *buffer_info = &rx_ring->buffer_info[i];
+ struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
struct sk_buff *skb = buffer_info->skb;
union e1000_adv_rx_desc *next_rxd;
}
if (!(staterr & E1000_RXD_STAT_EOP)) {
- struct igb_buffer *next_buffer;
- next_buffer = &rx_ring->buffer_info[i];
+ struct igb_rx_buffer *next_buffer;
+ next_buffer = &rx_ring->rx_buffer_info[i];
buffer_info->skb = next_buffer->skb;
buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb;
}
static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
- struct igb_buffer *bi)
+ struct igb_rx_buffer *bi)
{
struct sk_buff *skb = bi->skb;
dma_addr_t dma = bi->dma;
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
- struct igb_buffer *bi)
+ struct igb_rx_buffer *bi)
{
struct page *page = bi->page;
dma_addr_t page_dma = bi->page_dma;
void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
{
union e1000_adv_rx_desc *rx_desc;
- struct igb_buffer *bi;
+ struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
rx_desc = IGB_RX_DESC(rx_ring, i);
- bi = &rx_ring->buffer_info[i];
+ bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
while (cleaned_count--) {
i++;
if (unlikely(!i)) {
rx_desc = IGB_RX_DESC(rx_ring, 0);
- bi = rx_ring->buffer_info;
+ bi = rx_ring->rx_buffer_info;
i -= rx_ring->count;
}