struct mac_info *mac_control = &nic->mac_control;
struct config_param *config = &nic->config;
- for (i = 0; i < config->tx_fifo_num; i++)
- spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+
+ spin_lock_irqsave(&fifo->tx_lock, flags[i]);
+ }
nic->vlgrp = grp;
- for (i = config->tx_fifo_num - 1; i >= 0; i--)
- spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
- flags[i]);
+
+ for (i = config->tx_fifo_num - 1; i >= 0; i--) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+
+ spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
+ }
}
/* Unregister the vlan */
struct mac_info *mac_control = &nic->mac_control;
struct config_param *config = &nic->config;
- for (i = 0; i < config->tx_fifo_num; i++)
- spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+
+ spin_lock_irqsave(&fifo->tx_lock, flags[i]);
+ }
if (nic->vlgrp)
vlan_group_set_device(nic->vlgrp, vid, NULL);
- for (i = config->tx_fifo_num - 1; i >= 0; i--)
- spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
- flags[i]);
+ for (i = config->tx_fifo_num - 1; i >= 0; i--) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+
+ spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
+ }
}
/*
mac_control = &nic->mac_control;
config = &nic->config;
-
- /* Allocation and initialization of TXDLs in FIOFs */
+ /* Allocation and initialization of TXDLs in FIFOs */
size = 0;
for (i = 0; i < config->tx_fifo_num; i++) {
- size += config->tx_cfg[i].fifo_len;
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ size += tx_cfg->fifo_len;
}
if (size > MAX_AVAILABLE_TXDS) {
DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
size = 0;
for (i = 0; i < config->tx_fifo_num; i++) {
- size = config->tx_cfg[i].fifo_len;
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ size = tx_cfg->fifo_len;
/*
* Legal values are from 2 to 8192
*/
lst_per_page = PAGE_SIZE / lst_size;
for (i = 0; i < config->tx_fifo_num; i++) {
- int fifo_len = config->tx_cfg[i].fifo_len;
+ struct fifo_info *fifo = &mac_control->fifos[i];
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+ int fifo_len = tx_cfg->fifo_len;
int list_holder_size = fifo_len * sizeof(struct list_info_hold);
- mac_control->fifos[i].list_info = kzalloc(list_holder_size,
- GFP_KERNEL);
- if (!mac_control->fifos[i].list_info) {
+
+ fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
+ if (!fifo->list_info) {
DBG_PRINT(INFO_DBG,
"Malloc failed for list_info\n");
return -ENOMEM;
for (i = 0; i < config->tx_fifo_num; i++) {
int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
lst_per_page);
- mac_control->fifos[i].tx_curr_put_info.offset = 0;
- mac_control->fifos[i].tx_curr_put_info.fifo_len =
- config->tx_cfg[i].fifo_len - 1;
- mac_control->fifos[i].tx_curr_get_info.offset = 0;
- mac_control->fifos[i].tx_curr_get_info.fifo_len =
- config->tx_cfg[i].fifo_len - 1;
- mac_control->fifos[i].fifo_no = i;
- mac_control->fifos[i].nic = nic;
- mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
- mac_control->fifos[i].dev = dev;
+ struct fifo_info *fifo = &mac_control->fifos[i];
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ fifo->tx_curr_put_info.offset = 0;
+ fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
+ fifo->tx_curr_get_info.offset = 0;
+ fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
+ fifo->fifo_no = i;
+ fifo->nic = nic;
+ fifo->max_txds = MAX_SKB_FRAGS + 2;
+ fifo->dev = dev;
for (j = 0; j < page_num; j++) {
int k = 0;
}
while (k < lst_per_page) {
int l = (j * lst_per_page) + k;
- if (l == config->tx_cfg[i].fifo_len)
+ if (l == tx_cfg->fifo_len)
break;
- mac_control->fifos[i].list_info[l].list_virt_addr =
+ fifo->list_info[l].list_virt_addr =
tmp_v + (k * lst_size);
- mac_control->fifos[i].list_info[l].list_phy_addr =
+ fifo->list_info[l].list_phy_addr =
tmp_p + (k * lst_size);
k++;
}
}
for (i = 0; i < config->tx_fifo_num; i++) {
- size = config->tx_cfg[i].fifo_len;
- mac_control->fifos[i].ufo_in_band_v
- = kcalloc(size, sizeof(u64), GFP_KERNEL);
- if (!mac_control->fifos[i].ufo_in_band_v)
+ struct fifo_info *fifo = &mac_control->fifos[i];
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ size = tx_cfg->fifo_len;
+ fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
+ if (!fifo->ufo_in_band_v)
return -ENOMEM;
mem_allocated += (size * sizeof(u64));
}
/* Allocation and initialization of RXDs in Rings */
size = 0;
for (i = 0; i < config->rx_ring_num; i++) {
- if (config->rx_cfg[i].num_rxd %
- (rxd_count[nic->rxd_mode] + 1)) {
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+ struct ring_info *ring = &mac_control->rings[i];
+
+ if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
- DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
- i);
+ DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", i);
DBG_PRINT(ERR_DBG, "RxDs per Block");
return FAILURE;
}
- size += config->rx_cfg[i].num_rxd;
- mac_control->rings[i].block_count =
- config->rx_cfg[i].num_rxd /
+ size += rx_cfg->num_rxd;
+ ring->block_count = rx_cfg->num_rxd /
(rxd_count[nic->rxd_mode] + 1 );
- mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
- mac_control->rings[i].block_count;
+ ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
}
if (nic->rxd_mode == RXD_MODE_1)
size = (size * (sizeof(struct RxD1)));
size = (size * (sizeof(struct RxD3)));
for (i = 0; i < config->rx_ring_num; i++) {
- mac_control->rings[i].rx_curr_get_info.block_index = 0;
- mac_control->rings[i].rx_curr_get_info.offset = 0;
- mac_control->rings[i].rx_curr_get_info.ring_len =
- config->rx_cfg[i].num_rxd - 1;
- mac_control->rings[i].rx_curr_put_info.block_index = 0;
- mac_control->rings[i].rx_curr_put_info.offset = 0;
- mac_control->rings[i].rx_curr_put_info.ring_len =
- config->rx_cfg[i].num_rxd - 1;
- mac_control->rings[i].nic = nic;
- mac_control->rings[i].ring_no = i;
- mac_control->rings[i].lro = lro_enable;
-
- blk_cnt = config->rx_cfg[i].num_rxd /
- (rxd_count[nic->rxd_mode] + 1);
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+ struct ring_info *ring = &mac_control->rings[i];
+
+ ring->rx_curr_get_info.block_index = 0;
+ ring->rx_curr_get_info.offset = 0;
+ ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
+ ring->rx_curr_put_info.block_index = 0;
+ ring->rx_curr_put_info.offset = 0;
+ ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
+ ring->nic = nic;
+ ring->ring_no = i;
+ ring->lro = lro_enable;
+
+ blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
/* Allocating all the Rx blocks */
for (j = 0; j < blk_cnt; j++) {
struct rx_block_info *rx_blocks;
int l;
- rx_blocks = &mac_control->rings[i].rx_blocks[j];
+ rx_blocks = &ring->rx_blocks[j];
size = SIZE_OF_BLOCK; //size is always page size
tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
&tmp_p_addr);
}
/* Interlinking all Rx Blocks */
for (j = 0; j < blk_cnt; j++) {
- tmp_v_addr =
- mac_control->rings[i].rx_blocks[j].block_virt_addr;
- tmp_v_addr_next =
- mac_control->rings[i].rx_blocks[(j + 1) %
- blk_cnt].block_virt_addr;
- tmp_p_addr =
- mac_control->rings[i].rx_blocks[j].block_dma_addr;
- tmp_p_addr_next =
- mac_control->rings[i].rx_blocks[(j + 1) %
- blk_cnt].block_dma_addr;
+ int next = (j + 1) % blk_cnt;
+ tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
+ tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
+ tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
+ tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
pre_rxd_blk->reserved_2_pNext_RxD_block =
* and the buffers as well.
*/
for (i = 0; i < config->rx_ring_num; i++) {
- blk_cnt = config->rx_cfg[i].num_rxd /
- (rxd_count[nic->rxd_mode]+ 1);
- mac_control->rings[i].ba =
- kmalloc((sizeof(struct buffAdd *) * blk_cnt),
- GFP_KERNEL);
- if (!mac_control->rings[i].ba)
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+ struct ring_info *ring = &mac_control->rings[i];
+
+ blk_cnt = rx_cfg->num_rxd /
+ (rxd_count[nic->rxd_mode]+ 1);
+ ring->ba = kmalloc((sizeof(struct buffAdd *) * blk_cnt),
+ GFP_KERNEL);
+ if (!ring->ba)
return -ENOMEM;
mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
for (j = 0; j < blk_cnt; j++) {
int k = 0;
- mac_control->rings[i].ba[j] =
+ ring->ba[j] =
kmalloc((sizeof(struct buffAdd) *
(rxd_count[nic->rxd_mode] + 1)),
GFP_KERNEL);
- if (!mac_control->rings[i].ba[j])
+ if (!ring->ba[j])
return -ENOMEM;
mem_allocated += (sizeof(struct buffAdd) * \
(rxd_count[nic->rxd_mode] + 1));
while (k != rxd_count[nic->rxd_mode]) {
- ba = &mac_control->rings[i].ba[j][k];
+ ba = &ring->ba[j][k];
ba->ba_0_org = (void *) kmalloc
(BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
lst_per_page = PAGE_SIZE / lst_size;
for (i = 0; i < config->tx_fifo_num; i++) {
- page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
- lst_per_page);
+ struct fifo_info *fifo = &mac_control->fifos[i];
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
for (j = 0; j < page_num; j++) {
int mem_blks = (j * lst_per_page);
- if (!mac_control->fifos[i].list_info)
+ struct list_info_hold *fli;
+
+ if (!fifo->list_info)
return;
- if (!mac_control->fifos[i].list_info[mem_blks].
- list_virt_addr)
+
+ fli = &fifo->list_info[mem_blks];
+ if (!fli->list_virt_addr)
break;
pci_free_consistent(nic->pdev, PAGE_SIZE,
- mac_control->fifos[i].
- list_info[mem_blks].
- list_virt_addr,
- mac_control->fifos[i].
- list_info[mem_blks].
- list_phy_addr);
+ fli->list_virt_addr,
+ fli->list_phy_addr);
nic->mac_control.stats_info->sw_stat.mem_freed
+= PAGE_SIZE;
}
nic->mac_control.stats_info->sw_stat.mem_freed
+= PAGE_SIZE;
}
- kfree(mac_control->fifos[i].list_info);
+ kfree(fifo->list_info);
nic->mac_control.stats_info->sw_stat.mem_freed +=
(nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
}
size = SIZE_OF_BLOCK;
for (i = 0; i < config->rx_ring_num; i++) {
- blk_cnt = mac_control->rings[i].block_count;
+ struct ring_info *ring = &mac_control->rings[i];
+
+ blk_cnt = ring->block_count;
for (j = 0; j < blk_cnt; j++) {
- tmp_v_addr = mac_control->rings[i].rx_blocks[j].
- block_virt_addr;
- tmp_p_addr = mac_control->rings[i].rx_blocks[j].
- block_dma_addr;
+ tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
+ tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
if (tmp_v_addr == NULL)
break;
pci_free_consistent(nic->pdev, size,
tmp_v_addr, tmp_p_addr);
nic->mac_control.stats_info->sw_stat.mem_freed += size;
- kfree(mac_control->rings[i].rx_blocks[j].rxds);
+ kfree(ring->rx_blocks[j].rxds);
nic->mac_control.stats_info->sw_stat.mem_freed +=
( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
}
if (nic->rxd_mode == RXD_MODE_3B) {
/* Freeing buffer storage addresses in 2BUFF mode. */
for (i = 0; i < config->rx_ring_num; i++) {
- blk_cnt = config->rx_cfg[i].num_rxd /
- (rxd_count[nic->rxd_mode] + 1);
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+ struct ring_info *ring = &mac_control->rings[i];
+
+ blk_cnt = rx_cfg->num_rxd /
+ (rxd_count[nic->rxd_mode] + 1);
for (j = 0; j < blk_cnt; j++) {
int k = 0;
- if (!mac_control->rings[i].ba[j])
+ if (!ring->ba[j])
continue;
while (k != rxd_count[nic->rxd_mode]) {
- struct buffAdd *ba =
- &mac_control->rings[i].ba[j][k];
+ struct buffAdd *ba = &ring->ba[j][k];
kfree(ba->ba_0_org);
nic->mac_control.stats_info->sw_stat.\
mem_freed += (BUF0_LEN + ALIGN_SIZE);
mem_freed += (BUF1_LEN + ALIGN_SIZE);
k++;
}
- kfree(mac_control->rings[i].ba[j]);
+ kfree(ring->ba[j]);
nic->mac_control.stats_info->sw_stat.mem_freed +=
(sizeof(struct buffAdd) *
(rxd_count[nic->rxd_mode] + 1));
}
- kfree(mac_control->rings[i].ba);
+ kfree(ring->ba);
nic->mac_control.stats_info->sw_stat.mem_freed +=
(sizeof(struct buffAdd *) * blk_cnt);
}
}
for (i = 0; i < nic->config.tx_fifo_num; i++) {
- if (mac_control->fifos[i].ufo_in_band_v) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ if (fifo->ufo_in_band_v) {
nic->mac_control.stats_info->sw_stat.mem_freed
- += (config->tx_cfg[i].fifo_len * sizeof(u64));
- kfree(mac_control->fifos[i].ufo_in_band_v);
+ += (tx_cfg->fifo_len * sizeof(u64));
+ kfree(fifo->ufo_in_band_v);
}
}
for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
- val64 |=
- vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
- 13) | vBIT(config->tx_cfg[i].fifo_priority,
- ((j * 32) + 5), 3);
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
+ vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
if (i == (config->tx_fifo_num - 1)) {
if (i % 2 == 0)
/* Rx DMA intialization. */
val64 = 0;
for (i = 0; i < config->rx_ring_num; i++) {
- val64 |=
- vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
- 3);
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+
+ val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
}
writeq(val64, &bar0->rx_queue_priority);
/* PRC Initialization and configuration */
for (i = 0; i < config->rx_ring_num; i++) {
- writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
+ struct ring_info *ring = &mac_control->rings[i];
+
+ writeq((u64) ring->rx_blocks[0].block_dma_addr,
&bar0->prc_rxd0_n[i]);
val64 = readq(&bar0->prc_ctrl_n[i]);
config = &nic->config;
for (i = 0; i < config->tx_fifo_num; i++) {
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+ struct fifo_info *fifo = &mac_control->fifos[i];
unsigned long flags;
- spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
- for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
- txdp = (struct TxD *) \
- mac_control->fifos[i].list_info[j].list_virt_addr;
+
+ spin_lock_irqsave(&fifo->tx_lock, flags);
+ for (j = 0; j < tx_cfg->fifo_len; j++) {
+ txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
if (skb) {
nic->mac_control.stats_info->sw_stat.mem_freed
DBG_PRINT(INTR_DBG,
"%s:forcibly freeing %d skbs on FIFO%d\n",
dev->name, cnt, i);
- mac_control->fifos[i].tx_curr_get_info.offset = 0;
- mac_control->fifos[i].tx_curr_put_info.offset = 0;
- spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
+ fifo->tx_curr_get_info.offset = 0;
+ fifo->tx_curr_put_info.offset = 0;
+ spin_unlock_irqrestore(&fifo->tx_lock, flags);
}
}
config = &sp->config;
for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
for (blk = 0; blk < rx_ring_sz[i]; blk++)
free_rxd_blk(sp,i,blk);
- mac_control->rings[i].rx_curr_put_info.block_index = 0;
- mac_control->rings[i].rx_curr_get_info.block_index = 0;
- mac_control->rings[i].rx_curr_put_info.offset = 0;
- mac_control->rings[i].rx_curr_get_info.offset = 0;
- mac_control->rings[i].rx_bufs_left = 0;
+ ring->rx_curr_put_info.block_index = 0;
+ ring->rx_curr_get_info.block_index = 0;
+ ring->rx_curr_put_info.offset = 0;
+ ring->rx_curr_get_info.offset = 0;
+ ring->rx_bufs_left = 0;
DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
dev->name, buf_cnt, i);
}
static int s2io_poll_inta(struct napi_struct *napi, int budget)
{
struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
- struct ring_info *ring;
struct config_param *config;
struct mac_info *mac_control;
int pkts_processed = 0;
return 0;
for (i = 0; i < config->rx_ring_num; i++) {
- ring = &mac_control->rings[i];
+ struct ring_info *ring = &mac_control->rings[i];
ring_pkts_processed = rx_intr_handler(ring, budget);
s2io_chk_rx_buffers(nic, ring);
pkts_processed += ring_pkts_processed;
tx_intr_handler(&mac_control->fifos[i]);
/* check for received packet and indicate up to network */
- for (i = 0; i < config->rx_ring_num; i++)
- rx_intr_handler(&mac_control->rings[i], 0);
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ rx_intr_handler(ring, 0);
+ }
for (i = 0; i < config->rx_ring_num; i++) {
- if (fill_rx_buffers(nic, &mac_control->rings[i], 0) ==
- -ENOMEM) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
break;
if (reason & GEN_INTR_RXTRAFFIC)
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
- for (i = 0; i < config->rx_ring_num; i++)
- rx_intr_handler(&mac_control->rings[i], 0);
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ rx_intr_handler(ring, 0);
+ }
}
/*
* Reallocate the buffers from the interrupt handler itself.
*/
if (!config->napi) {
- for (i = 0; i < config->rx_ring_num; i++)
- s2io_chk_rx_buffers(sp, &mac_control->rings[i]);
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ s2io_chk_rx_buffers(sp, ring);
+ }
}
writeq(sp->general_int_mask, &bar0->general_int_mask);
readl(&bar0->general_int_status);
/* collect per-ring rx_packets and rx_bytes */
dev->stats.rx_packets = dev->stats.rx_bytes = 0;
for (i = 0; i < config->rx_ring_num; i++) {
- dev->stats.rx_packets += mac_control->rings[i].rx_packets;
- dev->stats.rx_bytes += mac_control->rings[i].rx_bytes;
+ struct ring_info *ring = &mac_control->rings[i];
+
+ dev->stats.rx_packets += ring->rx_packets;
+ dev->stats.rx_bytes += ring->rx_bytes;
}
return (&dev->stats);
size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
for (i = 0; i < config->rx_ring_num; i++) {
- blk_cnt = config->rx_cfg[i].num_rxd /
- (rxd_count[sp->rxd_mode] +1);
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+ struct ring_info *ring = &mac_control->rings[i];
+
+ blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] +1);
for (j = 0; j < blk_cnt; j++) {
for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
- rxdp = mac_control->rings[i].
- rx_blocks[j].rxds[k].virt_addr;
+ rxdp = ring-> rx_blocks[j].rxds[k].virt_addr;
if(sp->rxd_mode == RXD_MODE_3B)
- ba = &mac_control->rings[i].ba[j][k];
+ ba = &ring->ba[j][k];
if (set_rxd_buffer_pointer(sp, rxdp, ba,
&skb,(u64 *)&temp0_64,
(u64 *)&temp1_64,
config = &sp->config;
for (i = 0; i < config->rx_ring_num; i++) {
- mac_control->rings[i].mtu = dev->mtu;
- ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
+ struct ring_info *ring = &mac_control->rings[i];
+
+ ring->mtu = dev->mtu;
+ ret = fill_rx_buffers(sp, ring, 1);
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name);
return -ENOMEM;
}
DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
- mac_control->rings[i].rx_bufs_left);
+ ring->rx_bufs_left);
}
/* Initialise napi */
config->multiq = dev_multiq;
for (i = 0; i < config->tx_fifo_num; i++) {
- config->tx_cfg[i].fifo_len = tx_fifo_len[i];
- config->tx_cfg[i].fifo_priority = i;
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ tx_cfg->fifo_len = tx_fifo_len[i];
+ tx_cfg->fifo_priority = i;
}
/* mapping the QoS priority to the configured fifos */
config->tx_intr_type = TXD_INT_TYPE_UTILZ;
for (i = 0; i < config->tx_fifo_num; i++) {
- config->tx_cfg[i].f_no_snoop =
- (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
- if (config->tx_cfg[i].fifo_len < 65) {
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
+ if (tx_cfg->fifo_len < 65) {
config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
break;
}
/* Rx side parameters. */
config->rx_ring_num = rx_ring_num;
for (i = 0; i < config->rx_ring_num; i++) {
- config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
- (rxd_count[sp->rxd_mode] + 1);
- config->rx_cfg[i].ring_priority = i;
- mac_control->rings[i].rx_bufs_left = 0;
- mac_control->rings[i].rxd_mode = sp->rxd_mode;
- mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
- mac_control->rings[i].pdev = sp->pdev;
- mac_control->rings[i].dev = sp->dev;
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+ struct ring_info *ring = &mac_control->rings[i];
+
+ rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
+ rx_cfg->ring_priority = i;
+ ring->rx_bufs_left = 0;
+ ring->rxd_mode = sp->rxd_mode;
+ ring->rxd_count = rxd_count[sp->rxd_mode];
+ ring->pdev = sp->pdev;
+ ring->dev = sp->dev;
}
for (i = 0; i < rx_ring_num; i++) {
- config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
- config->rx_cfg[i].f_no_snoop =
- (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+
+ rx_cfg->ring_org = RING_ORG_BUFF1;
+ rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
}
/* Setting Mac Control parameters */
}
if (config->intr_type == MSI_X) {
- for (i = 0; i < config->rx_ring_num ; i++)
- netif_napi_add(dev, &mac_control->rings[i].napi,
- s2io_poll_msix, 64);
+ for (i = 0; i < config->rx_ring_num ; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
+ }
} else {
netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
}
sp->state = 0;
/* Initialize spinlocks */
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- spin_lock_init(&mac_control->fifos[i].tx_lock);
+ for (i = 0; i < sp->config.tx_fifo_num; i++) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+
+ spin_lock_init(&fifo->tx_lock);
+ }
/*
* SXE-002: Configure link and activity LED to init state
break;
}
if (sp->config.multiq) {
- for (i = 0; i < sp->config.tx_fifo_num; i++)
- mac_control->fifos[i].multiq = config->multiq;
+ for (i = 0; i < sp->config.tx_fifo_num; i++) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+
+ fifo->multiq = config->multiq;
+ }
DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
dev->name);
} else