static void et131x_update_tcb_list(struct et131x_adapter *etdev);
static void et131x_check_send_wait_list(struct et131x_adapter *etdev);
static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
- PMP_TCB pMpTcb);
+ struct tcb *tcb);
static int et131x_send_packet(struct sk_buff *skb,
struct et131x_adapter *etdev);
-static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb);
+static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb);
/**
* et131x_tx_dma_memory_alloc
int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
{
int desc_size = 0;
- TX_RING_t *tx_ring = &adapter->TxRing;
+ struct tx_ring *tx_ring = &adapter->tx_ring;
/* Allocate memory for the TCB's (Transmit Control Block) */
- adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB),
- GFP_ATOMIC | GFP_DMA);
- if (!adapter->TxRing.MpTcbMem) {
+ adapter->tx_ring.MpTcbMem = (struct tcb *)
+ kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
+ if (!adapter->tx_ring.MpTcbMem) {
dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
return -ENOMEM;
}
/* Allocate enough memory for the Tx descriptor ring, and allocate
* some extra so that the ring can be aligned on a 4k boundary.
*/
- desc_size = (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
- tx_ring->pTxDescRingVa =
- (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size,
- &tx_ring->pTxDescRingPa);
- if (!adapter->TxRing.pTxDescRingVa) {
+ desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
+ tx_ring->tx_desc_ring =
+ (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size,
+ &tx_ring->tx_desc_ring_pa);
+ if (!adapter->tx_ring.tx_desc_ring) {
dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n");
return -ENOMEM;
}
* are ever returned, make sure the high part is retrieved here before
* storing the adjusted address.
*/
- tx_ring->pTxDescRingAdjustedPa = tx_ring->pTxDescRingPa;
+ tx_ring->pTxDescRingAdjustedPa = tx_ring->tx_desc_ring_pa;
/* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
et131x_align_allocated_memory(adapter,
&tx_ring->pTxDescRingAdjustedPa,
&tx_ring->TxDescOffset, 0x0FFF);
- tx_ring->pTxDescRingVa += tx_ring->TxDescOffset;
+ tx_ring->tx_desc_ring += tx_ring->TxDescOffset;
/* Allocate memory for the Tx status block */
tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
sizeof(TX_STATUS_BLOCK_t),
&tx_ring->pTxStatusPa);
- if (!adapter->TxRing.pTxStatusPa) {
+ if (!adapter->tx_ring.pTxStatusPa) {
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Tx status block\n");
return -ENOMEM;
tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
NIC_MIN_PACKET_SIZE,
&tx_ring->pTxDummyBlkPa);
- if (!adapter->TxRing.pTxDummyBlkPa) {
+ if (!adapter->tx_ring.pTxDummyBlkPa) {
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Tx dummy buffer\n");
return -ENOMEM;
{
int desc_size = 0;
- if (adapter->TxRing.pTxDescRingVa) {
+ if (adapter->tx_ring.tx_desc_ring) {
/* Free memory relating to Tx rings here */
- adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset;
+ adapter->tx_ring.tx_desc_ring -= adapter->tx_ring.TxDescOffset;
- desc_size =
- (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
+ desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
+ + 4096 - 1;
pci_free_consistent(adapter->pdev,
desc_size,
- adapter->TxRing.pTxDescRingVa,
- adapter->TxRing.pTxDescRingPa);
+ adapter->tx_ring.tx_desc_ring,
+ adapter->tx_ring.tx_desc_ring_pa);
- adapter->TxRing.pTxDescRingVa = NULL;
+ adapter->tx_ring.tx_desc_ring = NULL;
}
/* Free memory for the Tx status block */
- if (adapter->TxRing.pTxStatusVa) {
+ if (adapter->tx_ring.pTxStatusVa) {
pci_free_consistent(adapter->pdev,
sizeof(TX_STATUS_BLOCK_t),
- adapter->TxRing.pTxStatusVa,
- adapter->TxRing.pTxStatusPa);
+ adapter->tx_ring.pTxStatusVa,
+ adapter->tx_ring.pTxStatusPa);
- adapter->TxRing.pTxStatusVa = NULL;
+ adapter->tx_ring.pTxStatusVa = NULL;
}
/* Free memory for the dummy buffer */
- if (adapter->TxRing.pTxDummyBlkVa) {
+ if (adapter->tx_ring.pTxDummyBlkVa) {
pci_free_consistent(adapter->pdev,
NIC_MIN_PACKET_SIZE,
- adapter->TxRing.pTxDummyBlkVa,
- adapter->TxRing.pTxDummyBlkPa);
+ adapter->tx_ring.pTxDummyBlkVa,
+ adapter->tx_ring.pTxDummyBlkPa);
- adapter->TxRing.pTxDummyBlkVa = NULL;
+ adapter->tx_ring.pTxDummyBlkVa = NULL;
}
- /* Free the memory for MP_TCB structures */
- kfree(adapter->TxRing.MpTcbMem);
+ /* Free the memory for the tcb structures */
+ kfree(adapter->tx_ring.MpTcbMem);
}
/**
struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma;
/* Load the hardware with the start of the transmit descriptor ring. */
- writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32),
+ writel((u32) (etdev->tx_ring.pTxDescRingAdjustedPa >> 32),
&txdma->pr_base_hi);
- writel((uint32_t) etdev->TxRing.pTxDescRingAdjustedPa,
+ writel((u32) etdev->tx_ring.pTxDescRingAdjustedPa,
&txdma->pr_base_lo);
/* Initialise the transmit DMA engine */
* storing the adjusted address.
*/
writel(0, &txdma->dma_wb_base_hi);
- writel(etdev->TxRing.pTxStatusPa, &txdma->dma_wb_base_lo);
+ writel(etdev->tx_ring.pTxStatusPa, &txdma->dma_wb_base_lo);
- memset(etdev->TxRing.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
+ memset(etdev->tx_ring.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
writel(0, &txdma->service_request);
- etdev->TxRing.txDmaReadyToSend = 0;
+ etdev->tx_ring.txDmaReadyToSend = 0;
}
/**
*/
void et131x_init_send(struct et131x_adapter *adapter)
{
- PMP_TCB pMpTcb;
- uint32_t TcbCount;
- TX_RING_t *tx_ring;
+ struct tcb *tcb;
+ u32 count;
+ struct tx_ring *tx_ring;
/* Setup some convenience pointers */
- tx_ring = &adapter->TxRing;
- pMpTcb = adapter->TxRing.MpTcbMem;
+ tx_ring = &adapter->tx_ring;
+ tcb = adapter->tx_ring.MpTcbMem;
- tx_ring->TCBReadyQueueHead = pMpTcb;
+ tx_ring->TCBReadyQueueHead = tcb;
/* Go through and set up each TCB */
- for (TcbCount = 0; TcbCount < NUM_TCB; TcbCount++) {
- memset(pMpTcb, 0, sizeof(MP_TCB));
+ for (count = 0; count < NUM_TCB; count++) {
+ memset(tcb, 0, sizeof(struct tcb));
/* Set the link pointer in HW TCB to the next TCB in the
* chain. If this is the last TCB in the chain, also set the
* tail pointer.
*/
- if (TcbCount < NUM_TCB - 1) {
- pMpTcb->Next = pMpTcb + 1;
+ if (count < NUM_TCB - 1) {
+ tcb->Next = tcb + 1;
} else {
- tx_ring->TCBReadyQueueTail = pMpTcb;
- pMpTcb->Next = (PMP_TCB) NULL;
+ tx_ring->TCBReadyQueueTail = tcb;
+ tcb->Next = NULL;
}
- pMpTcb++;
+ tcb++;
}
/* Curr send queue should now be empty */
- tx_ring->CurrSendHead = (PMP_TCB) NULL;
- tx_ring->CurrSendTail = (PMP_TCB) NULL;
+ tx_ring->CurrSendHead = NULL;
+ tx_ring->CurrSendTail = NULL;
- INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue);
+ INIT_LIST_HEAD(&adapter->tx_ring.SendWaitQueue);
}
/**
*/
/* Queue is not empty or TCB is not available */
- if (!list_empty(&etdev->TxRing.SendWaitQueue) ||
+ if (!list_empty(&etdev->tx_ring.SendWaitQueue) ||
MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) {
/* NOTE: If there's an error on send, no need to queue the
* packet under Linux; if we just send an error up to the
struct et131x_adapter *etdev)
{
int status = 0;
- PMP_TCB pMpTcb = NULL;
+ struct tcb *tcb = NULL;
uint16_t *shbufva;
unsigned long flags;
/* All packets must have at least a MAC address and a protocol type */
- if (skb->len < ETH_HLEN) {
+ if (skb->len < ETH_HLEN)
return -EIO;
- }
/* Get a TCB for this packet */
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
- pMpTcb = etdev->TxRing.TCBReadyQueueHead;
+ tcb = etdev->tx_ring.TCBReadyQueueHead;
- if (pMpTcb == NULL) {
+ if (tcb == NULL) {
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
return -ENOMEM;
}
- etdev->TxRing.TCBReadyQueueHead = pMpTcb->Next;
+ etdev->tx_ring.TCBReadyQueueHead = tcb->Next;
- if (etdev->TxRing.TCBReadyQueueHead == NULL)
- etdev->TxRing.TCBReadyQueueTail = NULL;
+ if (etdev->tx_ring.TCBReadyQueueHead == NULL)
+ etdev->tx_ring.TCBReadyQueueTail = NULL;
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
- pMpTcb->PacketLength = skb->len;
- pMpTcb->Packet = skb;
+ tcb->PacketLength = skb->len;
+ tcb->Packet = skb;
if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
shbufva = (uint16_t *) skb->data;
if ((shbufva[0] == 0xffff) &&
(shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
- pMpTcb->Flags |= fMP_DEST_BROAD;
+ tcb->Flags |= fMP_DEST_BROAD;
} else if ((shbufva[0] & 0x3) == 0x0001) {
- pMpTcb->Flags |= fMP_DEST_MULTI;
+ tcb->Flags |= fMP_DEST_MULTI;
}
}
- pMpTcb->Next = NULL;
+ tcb->Next = NULL;
/* Call the NIC specific send handler. */
if (status == 0)
- status = nic_send_packet(etdev, pMpTcb);
+ status = nic_send_packet(etdev, tcb);
if (status != 0) {
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
- if (etdev->TxRing.TCBReadyQueueTail) {
- etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
+ if (etdev->tx_ring.TCBReadyQueueTail) {
+ etdev->tx_ring.TCBReadyQueueTail->Next = tcb;
} else {
/* Apparently ready Q is empty. */
- etdev->TxRing.TCBReadyQueueHead = pMpTcb;
+ etdev->tx_ring.TCBReadyQueueHead = tcb;
}
- etdev->TxRing.TCBReadyQueueTail = pMpTcb;
+ etdev->tx_ring.TCBReadyQueueTail = tcb;
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
return status;
}
- WARN_ON(etdev->TxRing.nBusySend > NUM_TCB);
+ WARN_ON(etdev->tx_ring.nBusySend > NUM_TCB);
return 0;
}
/**
* nic_send_packet - NIC specific send handler for version B silicon.
* @etdev: pointer to our adapter
- * @pMpTcb: pointer to MP_TCB
+ * @tcb: pointer to struct tcb
*
* Returns 0 or errno.
*/
-static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
+static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
{
- uint32_t loopIndex;
- TX_DESC_ENTRY_t CurDesc[24];
- uint32_t FragmentNumber = 0;
- uint32_t thiscopy, remainder;
- struct sk_buff *pPacket = pMpTcb->Packet;
- uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
- struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
+ u32 i;
+ struct tx_desc desc[24]; /* 24 x 16 byte */
+ u32 frag = 0;
+ u32 thiscopy, remainder;
+ struct sk_buff *skb = tcb->Packet;
+ u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
+ struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
unsigned long flags;
/* Part of the optimizations of this send routine restrict us to
* number of fragments. If needed, we can call this function,
* although it is less efficient.
*/
- if (FragListCount > 23) {
+ if (nr_frags > 23)
return -EIO;
- }
- memset(CurDesc, 0, sizeof(TX_DESC_ENTRY_t) * (FragListCount + 1));
+ memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
- for (loopIndex = 0; loopIndex < FragListCount; loopIndex++) {
+ for (i = 0; i < nr_frags; i++) {
/* If there is something in this element, lets get a
* descriptor from the ring and get the necessary data
*/
- if (loopIndex == 0) {
+ if (i == 0) {
/* If the fragments are smaller than a standard MTU,
* then map them to a single descriptor in the Tx
* Desc ring. However, if they're larger, as is
* This will work until we determine why the hardware
* doesn't seem to like large fragments.
*/
- if ((pPacket->len - pPacket->data_len) <= 1514) {
- CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
+ if ((skb->len - skb->data_len) <= 1514) {
+ desc[frag].addr_hi = 0;
/* Low 16bits are length, high is vlan and
unused currently so zero */
- CurDesc[FragmentNumber].word2 =
- pPacket->len - pPacket->data_len;
+ desc[frag].len_vlan =
+ skb->len - skb->data_len;
/* NOTE: Here, the dma_addr_t returned from
* pci_map_single() is implicitly cast as a
- * uint32_t. Although dma_addr_t can be
+ * u32. Although dma_addr_t can be
* 64-bit, the address returned by
* pci_map_single() is always 32-bit
* addressable (as defined by the pci/dma
* subsystem)
*/
- CurDesc[FragmentNumber++].DataBufferPtrLow =
+ desc[frag++].addr_lo =
pci_map_single(etdev->pdev,
- pPacket->data,
- pPacket->len -
- pPacket->data_len,
+ skb->data,
+ skb->len -
+ skb->data_len,
PCI_DMA_TODEVICE);
} else {
- CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
- CurDesc[FragmentNumber].word2 =
- (pPacket->len - pPacket->data_len) / 2;
+ desc[frag].addr_hi = 0;
+ desc[frag].len_vlan =
+ (skb->len - skb->data_len) / 2;
/* NOTE: Here, the dma_addr_t returned from
* pci_map_single() is implicitly cast as a
- * uint32_t. Although dma_addr_t can be
+ * u32. Although dma_addr_t can be
* 64-bit, the address returned by
* pci_map_single() is always 32-bit
* addressable (as defined by the pci/dma
* subsystem)
*/
- CurDesc[FragmentNumber++].DataBufferPtrLow =
+ desc[frag++].addr_lo =
pci_map_single(etdev->pdev,
- pPacket->data,
- ((pPacket->len -
- pPacket->data_len) / 2),
+ skb->data,
+ ((skb->len -
+ skb->data_len) / 2),
PCI_DMA_TODEVICE);
- CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
+ desc[frag].addr_hi = 0;
- CurDesc[FragmentNumber].word2 =
- (pPacket->len - pPacket->data_len) / 2;
+ desc[frag].len_vlan =
+ (skb->len - skb->data_len) / 2;
/* NOTE: Here, the dma_addr_t returned from
* pci_map_single() is implicitly cast as a
- * uint32_t. Although dma_addr_t can be
+ * u32. Although dma_addr_t can be
* 64-bit, the address returned by
* pci_map_single() is always 32-bit
* addressable (as defined by the pci/dma
* subsystem)
*/
- CurDesc[FragmentNumber++].DataBufferPtrLow =
+ desc[frag++].addr_lo =
pci_map_single(etdev->pdev,
- pPacket->data +
- ((pPacket->len -
- pPacket->data_len) / 2),
- ((pPacket->len -
- pPacket->data_len) / 2),
+ skb->data +
+ ((skb->len -
+ skb->data_len) / 2),
+ ((skb->len -
+ skb->data_len) / 2),
PCI_DMA_TODEVICE);
}
} else {
- CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
- CurDesc[FragmentNumber].word2 =
- pFragList[loopIndex - 1].size;
+ desc[frag].addr_hi = 0;
+ desc[frag].len_vlan =
+ frags[i - 1].size;
/* NOTE: Here, the dma_addr_t returned from
- * pci_map_page() is implicitly cast as a uint32_t.
+ * pci_map_page() is implicitly cast as a u32.
* Although dma_addr_t can be 64-bit, the address
* returned by pci_map_page() is always 32-bit
* addressable (as defined by the pci/dma subsystem)
*/
- CurDesc[FragmentNumber++].DataBufferPtrLow =
+ desc[frag++].addr_lo =
pci_map_page(etdev->pdev,
- pFragList[loopIndex - 1].page,
- pFragList[loopIndex - 1].page_offset,
- pFragList[loopIndex - 1].size,
+ frags[i - 1].page,
+ frags[i - 1].page_offset,
+ frags[i - 1].size,
PCI_DMA_TODEVICE);
}
}
- if (FragmentNumber == 0)
+ if (frag == 0)
return -EIO;
if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
- if (++etdev->TxRing.TxPacketsSinceLastinterrupt ==
+ if (++etdev->tx_ring.TxPacketsSinceLastinterrupt ==
PARM_TX_NUM_BUFS_DEF) {
/* Last element & Interrupt flag */
- CurDesc[FragmentNumber - 1].word3 = 0x5;
- etdev->TxRing.TxPacketsSinceLastinterrupt = 0;
+ desc[frag - 1].flags = 0x5;
+ etdev->tx_ring.TxPacketsSinceLastinterrupt = 0;
} else { /* Last element */
- CurDesc[FragmentNumber - 1].word3 = 0x1;
+ desc[frag - 1].flags = 0x1;
}
} else {
- CurDesc[FragmentNumber - 1].word3 = 0x5;
+ desc[frag - 1].flags = 0x5;
}
- CurDesc[0].word3 |= 2; /* First element flag */
+ desc[0].flags |= 2; /* First element flag */
- pMpTcb->WrIndexStart = etdev->TxRing.txDmaReadyToSend;
- pMpTcb->PacketStaleCount = 0;
+ tcb->WrIndexStart = etdev->tx_ring.txDmaReadyToSend;
+ tcb->PacketStaleCount = 0;
spin_lock_irqsave(&etdev->SendHWLock, flags);
thiscopy = NUM_DESC_PER_RING_TX -
- INDEX10(etdev->TxRing.txDmaReadyToSend);
+ INDEX10(etdev->tx_ring.txDmaReadyToSend);
- if (thiscopy >= FragmentNumber) {
+ if (thiscopy >= frag) {
remainder = 0;
- thiscopy = FragmentNumber;
+ thiscopy = frag;
} else {
- remainder = FragmentNumber - thiscopy;
+ remainder = frag - thiscopy;
}
- memcpy(etdev->TxRing.pTxDescRingVa +
- INDEX10(etdev->TxRing.txDmaReadyToSend), CurDesc,
- sizeof(TX_DESC_ENTRY_t) * thiscopy);
+ memcpy(etdev->tx_ring.tx_desc_ring +
+ INDEX10(etdev->tx_ring.txDmaReadyToSend), desc,
+ sizeof(struct tx_desc) * thiscopy);
- add_10bit(&etdev->TxRing.txDmaReadyToSend, thiscopy);
+ add_10bit(&etdev->tx_ring.txDmaReadyToSend, thiscopy);
- if (INDEX10(etdev->TxRing.txDmaReadyToSend)== 0 ||
- INDEX10(etdev->TxRing.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) {
- etdev->TxRing.txDmaReadyToSend &= ~ET_DMA10_MASK;
- etdev->TxRing.txDmaReadyToSend ^= ET_DMA10_WRAP;
+ if (INDEX10(etdev->tx_ring.txDmaReadyToSend)== 0 ||
+ INDEX10(etdev->tx_ring.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) {
+ etdev->tx_ring.txDmaReadyToSend &= ~ET_DMA10_MASK;
+ etdev->tx_ring.txDmaReadyToSend ^= ET_DMA10_WRAP;
}
if (remainder) {
- memcpy(etdev->TxRing.pTxDescRingVa,
- CurDesc + thiscopy,
- sizeof(TX_DESC_ENTRY_t) * remainder);
+ memcpy(etdev->tx_ring.tx_desc_ring,
+ desc + thiscopy,
+ sizeof(struct tx_desc) * remainder);
- add_10bit(&etdev->TxRing.txDmaReadyToSend, remainder);
+ add_10bit(&etdev->tx_ring.txDmaReadyToSend, remainder);
}
- if (INDEX10(etdev->TxRing.txDmaReadyToSend) == 0) {
- if (etdev->TxRing.txDmaReadyToSend)
- pMpTcb->WrIndex = NUM_DESC_PER_RING_TX - 1;
+ if (INDEX10(etdev->tx_ring.txDmaReadyToSend) == 0) {
+ if (etdev->tx_ring.txDmaReadyToSend)
+ tcb->WrIndex = NUM_DESC_PER_RING_TX - 1;
else
- pMpTcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
+ tcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
} else
- pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend - 1;
+ tcb->WrIndex = etdev->tx_ring.txDmaReadyToSend - 1;
spin_lock(&etdev->TCBSendQLock);
- if (etdev->TxRing.CurrSendTail)
- etdev->TxRing.CurrSendTail->Next = pMpTcb;
+ if (etdev->tx_ring.CurrSendTail)
+ etdev->tx_ring.CurrSendTail->Next = tcb;
else
- etdev->TxRing.CurrSendHead = pMpTcb;
+ etdev->tx_ring.CurrSendHead = tcb;
- etdev->TxRing.CurrSendTail = pMpTcb;
+ etdev->tx_ring.CurrSendTail = tcb;
- WARN_ON(pMpTcb->Next != NULL);
+ WARN_ON(tcb->Next != NULL);
- etdev->TxRing.nBusySend++;
+ etdev->tx_ring.nBusySend++;
spin_unlock(&etdev->TCBSendQLock);
/* Write the new write pointer back to the device. */
- writel(etdev->TxRing.txDmaReadyToSend,
+ writel(etdev->tx_ring.txDmaReadyToSend,
&etdev->regs->txdma.service_request);
/* For Gig only, we use Tx Interrupt coalescing. Enable the software
/**
- * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
+ * et131x_free_send_packet - Recycle a struct tcb
* @etdev: pointer to our adapter
- * @pMpTcb: pointer to MP_TCB
+ * @tcb: pointer to struct tcb
*
+ * Complete the packet if necessary
* Assumption - Send spinlock has been acquired
*/
inline void et131x_free_send_packet(struct et131x_adapter *etdev,
- PMP_TCB pMpTcb)
+ struct tcb *tcb)
{
unsigned long flags;
- TX_DESC_ENTRY_t *desc = NULL;
+ struct tx_desc *desc = NULL;
struct net_device_stats *stats = &etdev->net_stats;
- if (pMpTcb->Flags & fMP_DEST_BROAD)
+ if (tcb->Flags & fMP_DEST_BROAD)
atomic_inc(&etdev->Stats.brdcstxmt);
- else if (pMpTcb->Flags & fMP_DEST_MULTI)
+ else if (tcb->Flags & fMP_DEST_MULTI)
atomic_inc(&etdev->Stats.multixmt);
else
atomic_inc(&etdev->Stats.unixmt);
- if (pMpTcb->Packet) {
- stats->tx_bytes += pMpTcb->Packet->len;
+ if (tcb->Packet) {
+ stats->tx_bytes += tcb->Packet->len;
/* Iterate through the TX descriptors on the ring
* corresponding to this packet and umap the fragments
* they point to
*/
do {
- desc =
- (TX_DESC_ENTRY_t *) (etdev->TxRing.pTxDescRingVa +
- INDEX10(pMpTcb->WrIndexStart));
+ desc =(struct tx_desc *) (etdev->tx_ring.tx_desc_ring +
+ INDEX10(tcb->WrIndexStart));
pci_unmap_single(etdev->pdev,
- desc->DataBufferPtrLow,
- desc->word2, PCI_DMA_TODEVICE);
+ desc->addr_lo,
+ desc->len_vlan, PCI_DMA_TODEVICE);
- add_10bit(&pMpTcb->WrIndexStart, 1);
- if (INDEX10(pMpTcb->WrIndexStart) >=
+ add_10bit(&tcb->WrIndexStart, 1);
+ if (INDEX10(tcb->WrIndexStart) >=
NUM_DESC_PER_RING_TX) {
- pMpTcb->WrIndexStart &= ~ET_DMA10_MASK;
- pMpTcb->WrIndexStart ^= ET_DMA10_WRAP;
+ tcb->WrIndexStart &= ~ET_DMA10_MASK;
+ tcb->WrIndexStart ^= ET_DMA10_WRAP;
}
- } while (desc != (etdev->TxRing.pTxDescRingVa +
- INDEX10(pMpTcb->WrIndex)));
+ } while (desc != (etdev->tx_ring.tx_desc_ring +
+ INDEX10(tcb->WrIndex)));
- dev_kfree_skb_any(pMpTcb->Packet);
+ dev_kfree_skb_any(tcb->Packet);
}
- memset(pMpTcb, 0, sizeof(MP_TCB));
+ memset(tcb, 0, sizeof(struct tcb));
/* Add the TCB to the Ready Q */
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
etdev->Stats.opackets++;
- if (etdev->TxRing.TCBReadyQueueTail) {
- etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
+ if (etdev->tx_ring.TCBReadyQueueTail) {
+ etdev->tx_ring.TCBReadyQueueTail->Next = tcb;
} else {
/* Apparently ready Q is empty. */
- etdev->TxRing.TCBReadyQueueHead = pMpTcb;
+ etdev->tx_ring.TCBReadyQueueHead = tcb;
}
- etdev->TxRing.TCBReadyQueueTail = pMpTcb;
+ etdev->tx_ring.TCBReadyQueueTail = tcb;
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
- WARN_ON(etdev->TxRing.nBusySend < 0);
+ WARN_ON(etdev->tx_ring.nBusySend < 0);
}
/**
*/
void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
{
- PMP_TCB pMpTcb;
+ struct tcb *tcb;
struct list_head *entry;
unsigned long flags;
- uint32_t FreeCounter = 0;
+ u32 freed = 0;
- while (!list_empty(&etdev->TxRing.SendWaitQueue)) {
+ while (!list_empty(&etdev->tx_ring.SendWaitQueue)) {
spin_lock_irqsave(&etdev->SendWaitLock, flags);
- etdev->TxRing.nWaitSend--;
+ etdev->tx_ring.nWaitSend--;
spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
- entry = etdev->TxRing.SendWaitQueue.next;
+ entry = etdev->tx_ring.SendWaitQueue.next;
}
- etdev->TxRing.nWaitSend = 0;
+ etdev->tx_ring.nWaitSend = 0;
/* Any packets being sent? Check the first TCB on the send list */
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
- pMpTcb = etdev->TxRing.CurrSendHead;
+ tcb = etdev->tx_ring.CurrSendHead;
- while ((pMpTcb != NULL) && (FreeCounter < NUM_TCB)) {
- PMP_TCB pNext = pMpTcb->Next;
+ while ((tcb != NULL) && (freed < NUM_TCB)) {
+ struct tcb *pNext = tcb->Next;
- etdev->TxRing.CurrSendHead = pNext;
+ etdev->tx_ring.CurrSendHead = pNext;
if (pNext == NULL)
- etdev->TxRing.CurrSendTail = NULL;
+ etdev->tx_ring.CurrSendTail = NULL;
- etdev->TxRing.nBusySend--;
+ etdev->tx_ring.nBusySend--;
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
- FreeCounter++;
- et131x_free_send_packet(etdev, pMpTcb);
+ freed++;
+ et131x_free_send_packet(etdev, tcb);
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
- pMpTcb = etdev->TxRing.CurrSendHead;
+ tcb = etdev->tx_ring.CurrSendHead;
}
- WARN_ON(FreeCounter == NUM_TCB);
+ WARN_ON(freed == NUM_TCB);
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
- etdev->TxRing.nBusySend = 0;
+ etdev->tx_ring.nBusySend = 0;
}
/**
static void et131x_update_tcb_list(struct et131x_adapter *etdev)
{
unsigned long flags;
- u32 ServiceComplete;
- PMP_TCB pMpTcb;
+ u32 serviced;
+ struct tcb * tcb;
u32 index;
- ServiceComplete = readl(&etdev->regs->txdma.NewServiceComplete);
- index = INDEX10(ServiceComplete);
+ serviced = readl(&etdev->regs->txdma.NewServiceComplete);
+ index = INDEX10(serviced);
/* Has the ring wrapped? Process any descriptors that do not have
* the same "wrap" indicator as the current completion indicator
*/
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
- pMpTcb = etdev->TxRing.CurrSendHead;
+ tcb = etdev->tx_ring.CurrSendHead;
- while (pMpTcb &&
- ((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP) &&
- index < INDEX10(pMpTcb->WrIndex)) {
- etdev->TxRing.nBusySend--;
- etdev->TxRing.CurrSendHead = pMpTcb->Next;
- if (pMpTcb->Next == NULL)
- etdev->TxRing.CurrSendTail = NULL;
+ while (tcb &&
+ ((serviced ^ tcb->WrIndex) & ET_DMA10_WRAP) &&
+ index < INDEX10(tcb->WrIndex)) {
+ etdev->tx_ring.nBusySend--;
+ etdev->tx_ring.CurrSendHead = tcb->Next;
+ if (tcb->Next == NULL)
+ etdev->tx_ring.CurrSendTail = NULL;
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
- et131x_free_send_packet(etdev, pMpTcb);
+ et131x_free_send_packet(etdev, tcb);
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
/* Goto the next packet */
- pMpTcb = etdev->TxRing.CurrSendHead;
+ tcb = etdev->tx_ring.CurrSendHead;
}
- while (pMpTcb &&
- !((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP)
- && index > (pMpTcb->WrIndex & ET_DMA10_MASK)) {
- etdev->TxRing.nBusySend--;
- etdev->TxRing.CurrSendHead = pMpTcb->Next;
- if (pMpTcb->Next == NULL)
- etdev->TxRing.CurrSendTail = NULL;
+ while (tcb &&
+ !((serviced ^ tcb->WrIndex) & ET_DMA10_WRAP)
+ && index > (tcb->WrIndex & ET_DMA10_MASK)) {
+ etdev->tx_ring.nBusySend--;
+ etdev->tx_ring.CurrSendHead = tcb->Next;
+ if (tcb->Next == NULL)
+ etdev->tx_ring.CurrSendTail = NULL;
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
- et131x_free_send_packet(etdev, pMpTcb);
+ et131x_free_send_packet(etdev, tcb);
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
/* Goto the next packet */
- pMpTcb = etdev->TxRing.CurrSendHead;
+ tcb = etdev->tx_ring.CurrSendHead;
}
/* Wake up the queue when we hit a low-water mark */
- if (etdev->TxRing.nBusySend <= (NUM_TCB / 3))
+ if (etdev->tx_ring.nBusySend <= (NUM_TCB / 3))
netif_wake_queue(etdev->netdev);
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
spin_lock_irqsave(&etdev->SendWaitLock, flags);
- while (!list_empty(&etdev->TxRing.SendWaitQueue) &&
+ while (!list_empty(&etdev->tx_ring.SendWaitQueue) &&
MP_TCB_RESOURCES_AVAILABLE(etdev)) {
struct list_head *entry;
- entry = etdev->TxRing.SendWaitQueue.next;
+ entry = etdev->tx_ring.SendWaitQueue.next;
- etdev->TxRing.nWaitSend--;
+ etdev->tx_ring.nWaitSend--;
}
spin_unlock_irqrestore(&etdev->SendWaitLock, flags);