#include "et1310_rx.h"
#include "et131x.h"
-static inline u32 bump_fbr(u32 *fbr, u32 limit)
+static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
{
- u32 v = *fbr;
- v++;
+ u32 tmp_free_buff_ring = *free_buff_ring;
+ tmp_free_buff_ring++;
/* This works for all cases where limit < 1024. The 1023 case
works because 1023++ is 1024 which means the if condition is not
taken but the carry of the bit into the wrap bit toggles the wrap
value correctly */
- if ((v & ET_DMA10_MASK) > limit) {
- v &= ~ET_DMA10_MASK;
- v ^= ET_DMA10_WRAP;
+ if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
+ tmp_free_buff_ring &= ~ET_DMA10_MASK;
+ tmp_free_buff_ring ^= ET_DMA10_WRAP;
}
/* For the 1023 case */
- v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
- *fbr = v;
- return v;
+ tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
+ *free_buff_ring = tmp_free_buff_ring;
+ return tmp_free_buff_ring;
}
/**
{
u32 i, j;
u32 bufsize;
- u32 pktStatRingSize, FBRChunkSize;
+ u32 pktstat_ringsize, fbr_chunksize;
struct rx_ring *rx_ring;
/* Setup some convenience pointers */
for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
i++) {
- u64 Fbr1Offset;
- u64 Fbr1TempPa;
- u32 Fbr1Align;
+ u64 fbr1_offset;
+ u64 fbr1_tmp_physaddr;
+ u32 fbr1_align;
/* This code allocates an area of memory big enough for N
* free buffers + (buffer_size - 1) so that the buffers can
* reduce this overhead.
*/
if (rx_ring->Fbr1BufferSize > 4096)
- Fbr1Align = 4096;
+ fbr1_align = 4096;
else
- Fbr1Align = rx_ring->Fbr1BufferSize;
+ fbr1_align = rx_ring->Fbr1BufferSize;
- FBRChunkSize =
- (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
+ fbr_chunksize =
+ (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + fbr1_align - 1;
rx_ring->Fbr1MemVa[i] =
- pci_alloc_consistent(adapter->pdev, FBRChunkSize,
+ pci_alloc_consistent(adapter->pdev, fbr_chunksize,
&rx_ring->Fbr1MemPa[i]);
if (!rx_ring->Fbr1MemVa[i]) {
}
/* See NOTE in "Save Physical Address" comment above */
- Fbr1TempPa = rx_ring->Fbr1MemPa[i];
+ fbr1_tmp_physaddr = rx_ring->Fbr1MemPa[i];
et131x_align_allocated_memory(adapter,
- &Fbr1TempPa,
- &Fbr1Offset, (Fbr1Align - 1));
+ &fbr1_tmp_physaddr,
+ &fbr1_offset, (fbr1_align - 1));
for (j = 0; j < FBR_CHUNKS; j++) {
u32 index = (i * FBR_CHUNKS) + j;
*/
rx_ring->fbr[1]->virt[index] =
(u8 *) rx_ring->Fbr1MemVa[i] +
- (j * rx_ring->Fbr1BufferSize) + Fbr1Offset;
+ (j * rx_ring->Fbr1BufferSize) + fbr1_offset;
/* now store the physical address in the descriptor
* so the device can access it
*/
rx_ring->fbr[1]->bus_high[index] =
- (u32) (Fbr1TempPa >> 32);
- rx_ring->fbr[1]->bus_low[index] = (u32) Fbr1TempPa;
+ (u32) (fbr1_tmp_physaddr >> 32);
+ rx_ring->fbr[1]->bus_low[index] =
+ (u32) fbr1_tmp_physaddr;
- Fbr1TempPa += rx_ring->Fbr1BufferSize;
+ fbr1_tmp_physaddr += rx_ring->Fbr1BufferSize;
rx_ring->fbr[1]->buffer1[index] =
rx_ring->fbr[1]->virt[index];
/* Same for FBR0 (if in use) */
for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
i++) {
- u64 Fbr0Offset;
- u64 Fbr0TempPa;
+ u64 fbr0_offset;
+ u64 fbr0_tmp_physaddr;
- FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
+ fbr_chunksize =
+ ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
rx_ring->Fbr0MemVa[i] =
- pci_alloc_consistent(adapter->pdev, FBRChunkSize,
+ pci_alloc_consistent(adapter->pdev, fbr_chunksize,
&rx_ring->Fbr0MemPa[i]);
if (!rx_ring->Fbr0MemVa[i]) {
}
/* See NOTE in "Save Physical Address" comment above */
- Fbr0TempPa = rx_ring->Fbr0MemPa[i];
+ fbr0_tmp_physaddr = rx_ring->Fbr0MemPa[i];
et131x_align_allocated_memory(adapter,
- &Fbr0TempPa,
- &Fbr0Offset,
+ &fbr0_tmp_physaddr,
+ &fbr0_offset,
rx_ring->Fbr0BufferSize - 1);
for (j = 0; j < FBR_CHUNKS; j++) {
rx_ring->fbr[0]->virt[index] =
(u8 *) rx_ring->Fbr0MemVa[i] +
- (j * rx_ring->Fbr0BufferSize) + Fbr0Offset;
+ (j * rx_ring->Fbr0BufferSize) + fbr0_offset;
rx_ring->fbr[0]->bus_high[index] =
- (u32) (Fbr0TempPa >> 32);
- rx_ring->fbr[0]->bus_low[index] = (u32) Fbr0TempPa;
+ (u32) (fbr0_tmp_physaddr >> 32);
+ rx_ring->fbr[0]->bus_low[index] =
+ (u32) fbr0_tmp_physaddr;
- Fbr0TempPa += rx_ring->Fbr0BufferSize;
+ fbr0_tmp_physaddr += rx_ring->Fbr0BufferSize;
rx_ring->fbr[0]->buffer1[index] =
rx_ring->fbr[0]->virt[index];
#endif
/* Allocate an area of memory for FIFO of Packet Status ring entries */
- pktStatRingSize =
+ pktstat_ringsize =
sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
- pktStatRingSize,
+ pktstat_ringsize,
&rx_ring->pPSRingPa);
if (!rx_ring->pPSRingVa) {
"Cannot alloc memory for Packet Status Ring\n");
return -ENOMEM;
}
- printk(KERN_INFO "PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
+ printk(KERN_INFO "Packet Status Ring %lx\n",
+ (unsigned long) rx_ring->pPSRingPa);
/*
* NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
{
u32 index;
u32 bufsize;
- u32 pktStatRingSize;
+ u32 pktstat_ringsize;
struct rfd *rfd;
struct rx_ring *rx_ring;
for (index = 0; index <
(rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
if (rx_ring->Fbr1MemVa[index]) {
- u32 Fbr1Align;
+ u32 fbr1_align;
if (rx_ring->Fbr1BufferSize > 4096)
- Fbr1Align = 4096;
+ fbr1_align = 4096;
else
- Fbr1Align = rx_ring->Fbr1BufferSize;
+ fbr1_align = rx_ring->Fbr1BufferSize;
bufsize =
(rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
- Fbr1Align - 1;
+ fbr1_align - 1;
pci_free_consistent(adapter->pdev,
bufsize,
/* Free Packet Status Ring */
if (rx_ring->pPSRingVa) {
- pktStatRingSize =
+ pktstat_ringsize =
sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
- pci_free_consistent(adapter->pdev, pktStatRingSize,
+ pci_free_consistent(adapter->pdev, pktstat_ringsize,
rx_ring->pPSRingVa, rx_ring->pPSRingPa);
rx_ring->pPSRingVa = NULL;
}
/**
- * ConfigRxDmaRegs - Start of Rx_DMA init sequence
+ * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
* @etdev: pointer to our adapter structure
*/
-void ConfigRxDmaRegs(struct et131x_adapter *etdev)
+void et131x_config_rx_dma_regs(struct et131x_adapter *etdev)
{
struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
struct rx_ring *rx_local = &etdev->rx_ring;
}
/**
- * SetRxDmaTimer - Set the heartbeat timer according to line rate.
+ * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
* @etdev: pointer to our adapter structure
*/
-void SetRxDmaTimer(struct et131x_adapter *etdev)
+void et131x_set_rx_dma_timer(struct et131x_adapter *etdev)
{
/* For version B silicon, we do not use the RxDMA timer for 10 and 100
* Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
{
struct rx_ring *rx_local = &etdev->rx_ring;
struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
- u16 bi = rfd->bufferindex;
- u8 ri = rfd->ringindex;
+ u16 buff_index = rfd->bufferindex;
+ u8 ring_index = rfd->ringindex;
unsigned long flags;
/* We don't use any of the OOB data besides status. Otherwise, we
*/
if (
#ifdef USE_FBR0
- (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
+ (ring_index == 0 && buff_index < rx_local->Fbr0NumEntries) ||
#endif
- (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
+ (ring_index == 1 && buff_index < rx_local->Fbr1NumEntries)) {
spin_lock_irqsave(&etdev->FbrLock, flags);
- if (ri == 1) {
+ if (ring_index == 1) {
struct fbr_desc *next =
(struct fbr_desc *) (rx_local->pFbr1RingVa) +
INDEX10(rx_local->local_Fbr1_full);
* the PA / Buffer Index for the returned buffer into
* the oldest (next to be freed)FBR entry
*/
- next->addr_hi = rx_local->fbr[1]->bus_high[bi];
- next->addr_lo = rx_local->fbr[1]->bus_low[bi];
- next->word2 = bi;
+ next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
+ next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
+ next->word2 = buff_index;
- writel(bump_fbr(&rx_local->local_Fbr1_full,
+ writel(bump_free_buff_ring(&rx_local->local_Fbr1_full,
rx_local->Fbr1NumEntries - 1),
&rx_dma->fbr1_full_offset);
}
* the PA / Buffer Index for the returned buffer into
* the oldest (next to be freed) FBR entry
*/
- next->addr_hi = rx_local->fbr[0]->bus_high[bi];
- next->addr_lo = rx_local->fbr[0]->bus_low[bi];
- next->word2 = bi;
+ next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
+ next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
+ next->word2 = buff_index;
- writel(bump_fbr(&rx_local->local_Fbr0_full,
+ writel(bump_free_buff_ring(&rx_local->local_Fbr0_full,
rx_local->Fbr0NumEntries - 1),
&rx_dma->fbr0_full_offset);
}
spin_unlock_irqrestore(&etdev->FbrLock, flags);
} else {
dev_err(&etdev->pdev->dev,
- "NICReturnRFD illegal Buffer Index returned\n");
+ "%s illegal Buffer Index returned\n", __func__);
}
/* The processing on this RFD is done, so put it back on the tail of
u8 *buf;
unsigned long flags;
struct list_head *element;
- u8 rindex;
- u16 bindex;
+ u8 ring_index;
+ u16 buff_index;
u32 len;
u32 word0;
u32 word1;
* accurate
*/
len = psr->word1 & 0xFFFF;
- rindex = (psr->word1 >> 26) & 0x03;
- bindex = (psr->word1 >> 16) & 0x3FF;
+ ring_index = (psr->word1 >> 26) & 0x03;
+ buff_index = (psr->word1 >> 16) & 0x3FF;
word0 = psr->word0;
/* Indicate that we have used this PSR entry. */
&etdev->regs->rxdma.psr_full_offset);
#ifndef USE_FBR0
- if (rindex != 1)
+ if (ring_index != 1)
return NULL;
#endif
#ifdef USE_FBR0
- if (rindex > 1 ||
- (rindex == 0 &&
- bindex > rx_local->Fbr0NumEntries - 1) ||
- (rindex == 1 &&
- bindex > rx_local->Fbr1NumEntries - 1))
+ if (ring_index > 1 ||
+ (ring_index == 0 &&
+ buff_index > rx_local->Fbr0NumEntries - 1) ||
+ (ring_index == 1 &&
+ buff_index > rx_local->Fbr1NumEntries - 1))
#else
- if (rindex != 1 || bindex > rx_local->Fbr1NumEntries - 1)
+ if (ring_index != 1 || buff_index > rx_local->Fbr1NumEntries - 1)
#endif
{
/* Illegal buffer or ring index cannot be used by S/W*/
"NICRxPkts PSR Entry %d indicates "
"length of %d and/or bad bi(%d)\n",
rx_local->local_psr_full & 0xFFF,
- len, bindex);
+ len, buff_index);
return NULL;
}
spin_unlock_irqrestore(&etdev->rcv_lock, flags);
- rfd->bufferindex = bindex;
- rfd->ringindex = rindex;
+ rfd->bufferindex = buff_index;
+ rfd->ringindex = ring_index;
/* In V1 silicon, there is a bug which screws up filtering of
* runt packets. Therefore runt packet filtering is disabled
if (len) {
if (etdev->ReplicaPhyLoopbk == 1) {
- buf = rx_local->fbr[rindex]->virt[bindex];
+ buf = rx_local->fbr[ring_index]->virt[buff_index];
if (memcmp(&buf[6], etdev->addr, ETH_ALEN) == 0) {
if (memcmp(&buf[42], "Replica packet",
if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
&& !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
&& !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
- buf = rx_local->fbr[rindex]->
- virt[bindex];
+ buf = rx_local->fbr[ring_index]->
+ virt[buff_index];
/* Loop through our list to see if the
* destination address of this packet
* matches one in our list.
*/
- for (i = 0;
- i < etdev->MCAddressCount;
- i++) {
- if (buf[0] ==
- etdev->MCList[i][0]
- && buf[1] ==
- etdev->MCList[i][1]
- && buf[2] ==
- etdev->MCList[i][2]
- && buf[3] ==
- etdev->MCList[i][3]
- && buf[4] ==
- etdev->MCList[i][4]
- && buf[5] ==
- etdev->MCList[i][5]) {
+ for (i = 0; i < etdev->MCAddressCount; i++) {
+ if (buf[0] == etdev->MCList[i][0]
+ && buf[1] == etdev->MCList[i][1]
+ && buf[2] == etdev->MCList[i][2]
+ && buf[3] == etdev->MCList[i][3]
+ && buf[4] == etdev->MCList[i][4]
+ && buf[5] == etdev->MCList[i][5]) {
break;
}
}
etdev->net_stats.rx_bytes += rfd->len;
memcpy(skb_put(skb, rfd->len),
- rx_local->fbr[rindex]->virt[bindex],
+ rx_local->fbr[ring_index]->virt[buff_index],
rfd->len);
skb->dev = etdev->netdev;
void et131x_reset_recv(struct et131x_adapter *etdev)
{
WARN_ON(list_empty(&etdev->rx_ring.RecvList));
-
}
/**