goto err_nomem;
}
- tx_ring->size = tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc);
+ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
&tx_ring->dma))) {
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
((u64) tx_ring->dma >> 32));
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
- tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc));
+ tx_ring->count * sizeof(union ixgbe_adv_tx_desc));
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
for (i = 0; i < tx_ring->count; i++) {
- struct ixgbe_legacy_tx_desc *desc = IXGBE_TX_DESC(*tx_ring, i);
+ union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
struct sk_buff *skb;
unsigned int size = 1024;
tx_ring->tx_buffer_info[i].length = skb->len;
tx_ring->tx_buffer_info[i].dma =
pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- desc->buffer_addr = cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
- desc->lower.data = cpu_to_le32(skb->len);
- desc->lower.data |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
- IXGBE_TXD_CMD_IFCS |
- IXGBE_TXD_CMD_RS);
- desc->upper.data = 0;
+ PCI_DMA_TODEVICE);
+ desc->read.buffer_addr =
+ cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
+ desc->read.cmd_type_len = cpu_to_le32(skb->len);
+ desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
+ IXGBE_TXD_CMD_IFCS |
+ IXGBE_TXD_CMD_RS);
+ desc->read.olinfo_status = 0;
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ desc->read.olinfo_status |=
+ (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
}
/* Setup Rx Descriptor ring and Rx buffers */
goto err_nomem;
}
- rx_ring->size = rx_ring->count * sizeof(struct ixgbe_legacy_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
&rx_ring->dma))) {
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
for (i = 0; i < rx_ring->count; i++) {
- struct ixgbe_legacy_rx_desc *rx_desc =
- IXGBE_RX_DESC(*rx_ring, i);
+ union ixgbe_adv_rx_desc *rx_desc =
+ IXGBE_RX_DESC_ADV(*rx_ring, i);
struct sk_buff *skb;
skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
rx_ring->rx_buffer_info[i].dma =
pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
PCI_DMA_FROMDEVICE);
- rx_desc->buffer_addr =
+ rx_desc->read.pkt_addr =
cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
}
#define IXGBE_FDIR_INIT_DONE_POLL 10
#define IXGBE_FDIRCMD_CMD_POLL 10
-/* Transmit Descriptor - Legacy */
-struct ixgbe_legacy_tx_desc {
- u64 buffer_addr; /* Address of the descriptor's data buffer */
- union {
- __le32 data;
- struct {
- __le16 length; /* Data buffer length */
- u8 cso; /* Checksum offset */
- u8 cmd; /* Descriptor control */
- } flags;
- } lower;
- union {
- __le32 data;
- struct {
- u8 status; /* Descriptor status */
- u8 css; /* Checksum start */
- __le16 vlan;
- } fields;
- } upper;
-};
-
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
} wb;
};
-/* Receive Descriptor - Legacy */
-struct ixgbe_legacy_rx_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- __le16 length; /* Length of data DMAed into data buffer */
- __le16 csum; /* Packet checksum */
- u8 status; /* Descriptor status */
- u8 errors; /* Descriptor Errors */
- __le16 vlan;
-};
-
/* Receive Descriptor - Advanced */
union ixgbe_adv_rx_desc {
struct {