Unnecessary casts of void * clutter the code.
These are the remainder casts after several specific
patches to remove netdev_priv and dev_priv.
Done via coccinelle script (and a little editing):
$ cat cast_void_pointer.cocci
@@
type T;
T *pt;
void *pv;
@@
- pt = (T *)pv;
+ pt = pv;
Signed-off-by: Joe Perches <joe@perches.com>
Acked-by: Sjur Brændeland <sjur.brandeland@stericsson.com>
Acked-By: Chris Snook <chris.snook@gmail.com>
Acked-by: Jon Mason <jdmason@kudzu.us>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: David Dillow <dave@thedillows.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
static irqreturn_t lance_interrupt (int irq, void *dev_id)
{
- struct net_device *dev;
- struct lance_private *lp;
- volatile struct lance_regs *ll;
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
int csr0;
- dev = (struct net_device *) dev_id;
-
- lp = netdev_priv(dev);
- ll = lp->ll;
-
ll->rap = LE_CSR0; /* LANCE Controller Status */
csr0 = ll->rdp;
int ret;
if(i) {
- qels[i].cbuf = (unsigned char *) cbuf;
+ qels[i].cbuf = cbuf;
qels[i].cbuflen = cbuflen;
- qels[i].dbuf = (unsigned char *) dbuf;
+ qels[i].dbuf = dbuf;
qels[i].dbuflen = dbuflen;
qels[i].QWrite = 1;
qels[i].mailbox = i; /* this should be initted rather */
int ret;
if(i) {
- qels[i].cbuf = (unsigned char *) cbuf;
+ qels[i].cbuf = cbuf;
qels[i].cbuflen = cbuflen;
- qels[i].dbuf = (unsigned char *) dbuf;
+ qels[i].dbuf = dbuf;
qels[i].dbuflen = dbuflen;
qels[i].QWrite = 0;
qels[i].mailbox = i; /* this should be initted rather */
/* Init TPD Ring */
tx_ring->dma = roundup(adapter->ring_dma, 8);
offset = tx_ring->dma - adapter->ring_dma;
- tx_ring->desc = (struct atl1e_tpd_desc *)
- (adapter->ring_vir_addr + offset);
+ tx_ring->desc = adapter->ring_vir_addr + offset;
size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
if (tx_ring->tx_buffer == NULL) {
/* Init CMB dma address */
tx_ring->cmb_dma = adapter->ring_dma + offset;
- tx_ring->cmb = (u32 *)(adapter->ring_vir_addr + offset);
+ tx_ring->cmb = adapter->ring_vir_addr + offset;
offset += sizeof(u32);
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->txd_dma = adapter->ring_dma ;
offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0;
adapter->txd_dma += offset;
- adapter->txd_ring = (struct tx_pkt_header *) (adapter->ring_vir_addr +
- offset);
+ adapter->txd_ring = adapter->ring_vir_addr + offset;
/* Init TXS Ring */
adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size;
status = be_mbox_notify_wait(adapter);
if (!status) {
- attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
- sizeof(struct be_cmd_resp_hdr));
+ attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
adapter->hba_port_num = attribs->hba_attribs.phy_port;
}
}
status = be_cmd_get_phy_info(adapter, &phy_cmd);
if (!status) {
- resp = (struct be_cmd_resp_get_phy_info *) phy_cmd.va;
+ resp = phy_cmd.va;
intf_type = le16_to_cpu(resp->interface_type);
switch (intf_type) {
status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
if (!status) {
- resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
+ resp = eeprom_cmd.va;
memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
}
dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
bfa_cee_hbfail(void *arg)
{
struct bfa_cee *cee;
- cee = (struct bfa_cee *) arg;
+ cee = arg;
if (cee->get_attr_pending == true) {
cee->get_attr_status = BFA_STATUS_FAILED;
bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
bfa_q_qe_init(*((struct list_head **) _qe)); \
} else { \
- *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
+ *((struct list_head **)(_qe)) = NULL; \
} \
}
u32 avail_emptybuff = 0;
unsigned long flags = 0;
- pshm_drv = (struct shmdrv_layer *)priv;
+ pshm_drv = priv;
/* Check for received buffers. */
if (mbx_msg & SHM_FULL_MASK) {
val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
cnic_ctx_wr(dev, cid_addr, offset1, val);
- txbd = (struct tx_bd *) udev->l2_ring;
+ txbd = udev->l2_ring;
buf_map = udev->l2_buf_map;
for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
- rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
+ rxbd = udev->l2_ring + BCM_PAGE_SIZE;
for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
while (td->tid_release_list) {
struct t3c_tid_entry *p = td->tid_release_list;
- td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
+ td->tid_release_list = p->ctx;
spin_unlock_bh(&td->tid_release_lock);
skb = alloc_skb(sizeof(struct cpl_tid_release),
} else {
pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
GFP_KERNEL);
- pool->iomap = (void __force __iomem *)pool->cpumap;
+ pool->iomap = pool->cpumap;
pool->hw_addr = pool->phys;
}
*/
static void cp_to_buf(const int type, void *to, const void *from, int len)
{
- unsigned short *tp, *fp, clen;
- unsigned char *rtp, *rfp;
+ unsigned short *tp;
+ const unsigned short *fp;
+ unsigned short clen;
+ unsigned char *rtp;
+ const unsigned char *rfp;
if (type == PMAD_LANCE) {
memcpy(to, from, len);
} else if (type == PMAX_LANCE) {
clen = len >> 1;
- tp = (unsigned short *) to;
- fp = (unsigned short *) from;
+ tp = to;
+ fp = from;
while (clen--) {
*tp++ = *fp++;
}
clen = len & 1;
- rtp = (unsigned char *) tp;
- rfp = (unsigned char *) fp;
+ rtp = tp;
+ rfp = fp;
while (clen--) {
*rtp++ = *rfp++;
}
* copy 16 Byte chunks
*/
clen = len >> 4;
- tp = (unsigned short *) to;
- fp = (unsigned short *) from;
+ tp = to;
+ fp = from;
while (clen--) {
*tp++ = *fp++;
*tp++ = *fp++;
static void cp_from_buf(const int type, void *to, const void *from, int len)
{
- unsigned short *tp, *fp, clen;
- unsigned char *rtp, *rfp;
+ unsigned short *tp;
+ const unsigned short *fp;
+ unsigned short clen;
+ unsigned char *rtp;
+ const unsigned char *rfp;
if (type == PMAD_LANCE) {
memcpy(to, from, len);
} else if (type == PMAX_LANCE) {
clen = len >> 1;
- tp = (unsigned short *) to;
- fp = (unsigned short *) from;
+ tp = to;
+ fp = from;
while (clen--) {
*tp++ = *fp++;
fp++;
clen = len & 1;
- rtp = (unsigned char *) tp;
- rfp = (unsigned char *) fp;
+ rtp = tp;
+ rfp = fp;
while (clen--) {
*rtp++ = *rfp++;
* copy 16 Byte chunks
*/
clen = len >> 4;
- tp = (unsigned short *) to;
- fp = (unsigned short *) from;
+ tp = to;
+ fp = from;
while (clen--) {
*tp++ = *fp++;
*tp++ = *fp++;
/* Tx & Rx descriptors (aligned to a quadword boundary) */
offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN;
- lp->rx_ring = (struct depca_rx_desc __iomem *) (lp->sh_mem + offset);
+ lp->rx_ring = lp->sh_mem + offset;
lp->rx_ring_offset = offset;
offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC);
- lp->tx_ring = (struct depca_tx_desc __iomem *) (lp->sh_mem + offset);
+ lp->tx_ring = lp->sh_mem + offset;
lp->tx_ring_offset = offset;
offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_iounmap;
- np->tx_ring = (struct netdev_desc *) ring_space;
+ np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
- np->rx_ring = (struct netdev_desc *) ring_space;
+ np->rx_ring = ring_space;
np->rx_ring_dma = ring_dma;
/* Parse eeprom data */
unsigned long flags;
spin_lock_irqsave(&eq->spinlock, flags);
- eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+ eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
spin_unlock_irqrestore(&eq->spinlock, flags);
return eqe;
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_iounmap;
- ep->tx_ring = (struct epic_tx_desc *)ring_space;
+ ep->tx_ring = ring_space;
ep->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
- ep->rx_ring = (struct epic_rx_desc *)ring_space;
+ ep->rx_ring = ring_space;
ep->rx_ring_dma = ring_dma;
if (dev->mem_start) {
err = -ENOMEM;
goto err_out_free_dev;
}
- np->rx_ring = (struct fealnx_desc *)ring_space;
+ np->rx_ring = ring_space;
np->rx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
err = -ENOMEM;
goto err_out_free_rx;
}
- np->tx_ring = (struct fealnx_desc *)ring_space;
+ np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma;
/* find the connected MII xcvrs */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
- tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
+ tx_queue->tx_bd_base = vaddr;
tx_queue->tx_bd_dma_base = addr;
tx_queue->dev = ndev;
/* enet DMA only understands physical addresses */
/* Start the rx descriptor ring where the tx ring leaves off */
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
+ rx_queue->rx_bd_base = vaddr;
rx_queue->rx_bd_dma_base = addr;
rx_queue->dev = ndev;
addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_cleardev;
- hmp->tx_ring = (struct hamachi_desc *)ring_space;
+ hmp->tx_ring = ring_space;
hmp->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
- hmp->rx_ring = (struct hamachi_desc *)ring_space;
+ hmp->rx_ring = ring_space;
hmp->rx_ring_dma = ring_dma;
/* Check for options being passed in */
SET_NETDEV_DEV(dev, &pdev->dev);
dev->base_addr = (u32)MACE_BASE;
- mp->mace = (volatile struct mace *) MACE_BASE;
+ mp->mace = MACE_BASE;
dev->irq = IRQ_MAC_MACE;
mp->dma_intr = IRQ_MAC_MACE_DMA;
priv->rx_ring[0].cqn, &context);
ptr = ((void *) &context) + 0x3c;
- rss_context = (struct mlx4_en_rss_context *) ptr;
+ rss_context = ptr;
rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
} else {
if (!tx_info->inl) {
if ((void *) data >= end) {
- data = (struct mlx4_wqe_data_seg *)
- (ring->buf + ((void *) data - end));
+ data = ring->buf + ((void *)data - end);
}
if (tx_info->linear) {
for (i = 0; i < frags; i++) {
/* Check for wraparound before unmapping */
if ((void *) data >= end)
- data = (struct mlx4_wqe_data_seg *) ring->buf;
+ data = ring->buf;
frag = &skb_shinfo(skb)->frags[i];
pci_unmap_page(mdev->pdev,
(dma_addr_t) be64_to_cpu(data->addr),
rq_size, &hostrq_phys_addr);
if (addr == NULL)
return -ENOMEM;
- prq = (nx_hostrq_rx_ctx_t *)addr;
+ prq = addr;
addr = pci_alloc_consistent(adapter->pdev,
rsp_size, &cardrsp_phys_addr);
err = -ENOMEM;
goto out_free_rq;
}
- prsp = (nx_cardrsp_rx_ctx_t *)addr;
+ prsp = addr;
prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
}
memset(rq_addr, 0, rq_size);
- prq = (nx_hostrq_tx_ctx_t *)rq_addr;
+ prq = rq_addr;
memset(rsp_addr, 0, rsp_size);
- prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr;
+ prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
}
memset(addr, 0, sizeof(struct netxen_ring_ctx));
- recv_ctx->hwctx = (struct netxen_ring_ctx *)addr;
+ recv_ctx->hwctx = addr;
recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
recv_ctx->hwctx->cmd_consumer_offset =
cpu_to_le64(recv_ctx->phys_addr +
goto err_out_free;
}
- tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
+ tx_ring->desc_head = addr;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
err = -ENOMEM;
goto err_out_free;
}
- rds_ring->desc_head = (struct rcv_desc *)addr;
+ rds_ring->desc_head = addr;
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
rds_ring->crb_rcv_producer =
err = -ENOMEM;
goto err_out_free;
}
- sds_ring->desc_head = (struct status_desc *)addr;
+ sds_ring->desc_head = addr;
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
sds_ring->crb_sts_consumer =
* Pick the appropriate table, start scanning for free/reusable
* entries at the index obtained by hashing the specified MAC address
*/
- start = (struct addr_table_entry *)(pep->htpr);
+ start = pep->htpr;
entry = start + hash_function(mac_addr);
for (i = 0; i < HOP_NUMBER; i++) {
if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
PAGE_SIZE, &qdev->shadow_reg_phy_addr);
if (qdev->shadow_reg_virt_addr != NULL) {
- qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
+ qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
qdev->req_consumer_index_phy_addr_high =
MS_64BITS(qdev->shadow_reg_phy_addr);
qdev->req_consumer_index_phy_addr_low =
qdev->small_buf_release_cnt = 8;
qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
qdev->lrg_buf_release_cnt = 8;
- qdev->lrg_buf_next_free =
- (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
+ qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
qdev->small_buf_index = 0;
qdev->lrg_buf_index = 0;
qdev->lrg_buf_free_count = 0;
err = -EIO;
goto error;
}
- tmp_tmpl = (struct qlcnic_dump_template_hdr *) tmp_addr;
+ tmp_tmpl = tmp_addr;
csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size);
if (csum) {
dev_err(&adapter->pdev->dev,
err = -EIO;
goto error;
}
- tmp_buf = (u32 *) tmp_addr;
+ tmp_buf = tmp_addr;
template = (u32 *) ahw->fw_dump.tmpl_hdr;
for (i = 0; i < temp_size/sizeof(u32); i++)
*template++ = __le32_to_cpu(*tmp_buf++);
&hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
- prq = (struct qlcnic_hostrq_rx_ctx *)addr;
+ prq = addr;
addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
&cardrsp_phys_addr, GFP_KERNEL);
err = -ENOMEM;
goto out_free_rq;
}
- prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
+ prsp = addr;
prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
}
memset(rq_addr, 0, rq_size);
- prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
+ prq = rq_addr;
memset(rsp_addr, 0, rsp_size);
- prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
+ prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
goto err_out_free;
}
- tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
+ tx_ring->desc_head = addr;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
err = -ENOMEM;
goto err_out_free;
}
- rds_ring->desc_head = (struct rcv_desc *)addr;
+ rds_ring->desc_head = addr;
}
err = -ENOMEM;
goto err_out_free;
}
- sds_ring->desc_head = (struct status_desc *)addr;
+ sds_ring->desc_head = addr;
}
return 0;
return -ENOMEM;
memset(nic_info_addr, 0, nic_size);
- nic_info = (struct qlcnic_info *) nic_info_addr;
+ nic_info = nic_info_addr;
err = qlcnic_issue_cmd(adapter,
adapter->ahw->pci_func,
adapter->fw_hal_version,
return -ENOMEM;
memset(nic_info_addr, 0, nic_size);
- nic_info = (struct qlcnic_info *)nic_info_addr;
+ nic_info = nic_info_addr;
nic_info->pci_func = cpu_to_le16(nic->pci_func);
nic_info->op_mode = cpu_to_le16(nic->op_mode);
return -ENOMEM;
memset(pci_info_addr, 0, pci_size);
- npar = (struct qlcnic_pci_info *) pci_info_addr;
+ npar = pci_info_addr;
err = qlcnic_issue_cmd(adapter,
adapter->ahw->pci_func,
adapter->fw_hal_version,
QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
if (!err) {
- stats = (struct __qlcnic_esw_statistics *)stats_addr;
+ stats = stats_addr;
esw_stats->context_id = le16_to_cpu(stats->context_id);
esw_stats->version = le16_to_cpu(stats->version);
esw_stats->size = le16_to_cpu(stats->size);
/* Copy template header first */
copy_sz = fw_dump->tmpl_hdr->size;
hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
- data = (u32 *) buffer;
+ data = buffer;
for (i = 0; i < copy_sz/sizeof(u32); i++)
*data++ = cpu_to_le32(*hdr_ptr++);
tmpl_hdr->sys_info[1] = adapter->fw_version;
for (i = 0; i < no_entries; i++) {
- entry = (struct qlcnic_dump_entry *) ((void *) tmpl_hdr +
- entry_offset);
+ entry = (void *)tmpl_hdr + entry_offset;
if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
entry_offset += entry->hdr.offset;
return;
adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
- adapter->fhash.fhead = (struct hlist_head *)head;
+ adapter->fhash.fhead = head;
for (i = 0; i < adapter->fhash.fmax; i++)
INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
if (rx_ring->lbq_len) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
tmp = (u64)rx_ring->lbq_base_dma;
- base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
+ base_indirect_ptr = rx_ring->lbq_base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
if (rx_ring->sbq_len) {
cqicb->flags |= FLAGS_LS; /* Load sbq values */
tmp = (u64)rx_ring->sbq_base_dma;
- base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
+ base_indirect_ptr = rx_ring->sbq_base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
- pre_rxd_blk = (struct RxD_block *)tmp_v_addr;
+ pre_rxd_blk = tmp_v_addr;
pre_rxd_blk->reserved_2_pNext_RxD_block =
(unsigned long)tmp_v_addr_next;
pre_rxd_blk->pNext_RxD_Blk_physical =
mac_control->stats_mem_sz = size;
tmp_v_addr = mac_control->stats_mem;
- mac_control->stats_info = (struct stat_block *)tmp_v_addr;
+ mac_control->stats_info = tmp_v_addr;
memset(tmp_v_addr, 0, size);
DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
spin_lock_irqsave(&fifo->tx_lock, flags);
for (j = 0; j < tx_cfg->fifo_len; j++) {
- txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
+ txdp = fifo->list_info[j].list_virt_addr;
skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
if (skb) {
swstats->mem_freed += skb->truesize;
get_info = fifo_data->tx_curr_get_info;
memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
- txdlp = (struct TxD *)
- fifo_data->list_info[get_info.offset].list_virt_addr;
+ txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
(get_info.offset != put_info.offset) &&
(txdlp->Host_Control)) {
get_info.offset++;
if (get_info.offset == get_info.fifo_len + 1)
get_info.offset = 0;
- txdlp = (struct TxD *)
- fifo_data->list_info[get_info.offset].list_virt_addr;
+ txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
fifo_data->tx_curr_get_info.offset = get_info.offset;
}
put_off = (u16)fifo->tx_curr_put_info.offset;
get_off = (u16)fifo->tx_curr_get_info.offset;
- txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr;
+ txdp = fifo->list_info[put_off].list_virt_addr;
queue_len = fifo->tx_curr_put_info.fifo_len + 1;
/* Avoid "put" pointer going beyond "get" pointer */
/* Initializing the BAR1 address as the start of the FIFO pointer. */
for (j = 0; j < MAX_TX_FIFOS; j++) {
- mac_control->tx_FIFO_start[j] =
- (struct TxFIFO_element __iomem *)
- (sp->bar1 + (j * 0x00020000));
+ mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
}
/* Driver entry points */
u64 generation_end;
mac_stats = &efx->mac_stats;
- dma_stats = (u64 *)efx->stats_buffer.addr;
+ dma_stats = efx->stats_buffer.addr;
generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
if (generation_end == STATS_GENERATION_INVALID)
ret = -ENOMEM;
goto err_out_cleardev;
}
- sis_priv->tx_ring = (BufferDesc *)ring_space;
+ sis_priv->tx_ring = ring_space;
sis_priv->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pci_dev, RX_TOTAL_SIZE, &ring_dma);
ret = -ENOMEM;
goto err_unmap_tx;
}
- sis_priv->rx_ring = (BufferDesc *)ring_space;
+ sis_priv->rx_ring = ring_space;
sis_priv->rx_ring_dma = ring_dma;
/* The SiS900-specific entries in the device structure. */
return IRQ_NONE;
}
- dev = (struct net_device *)dev_id;
+ dev = dev_id;
/* Make sure its really us. -- the Madge way */
pending = inb(dev->base_addr + MC_CONTROL_REG0);
dev->irq = pdev->irq;
tp = netdev_priv(dev);
- tp->shared = (struct typhoon_shared *) shared;
+ tp->shared = shared;
tp->shared_dma = shared_dma;
tp->pdev = pdev;
tp->tx_pdev = pdev;
goto exit;
val64 = readq(&legacy_reg->toc_first_pointer);
- toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
+ toc = bar0 + val64;
exit:
return toc;
}
u32 i;
enum vxge_hw_status status = VXGE_HW_OK;
- hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
+ hldev->legacy_reg = hldev->bar0;
hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
if (hldev->toc_reg == NULL) {
}
val64 = readq(&hldev->toc_reg->toc_common_pointer);
- hldev->common_reg =
- (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
+ hldev->common_reg = hldev->bar0 + val64;
val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
- hldev->mrpcim_reg =
- (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
+ hldev->mrpcim_reg = hldev->bar0 + val64;
for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
- hldev->srpcim_reg[i] =
- (struct vxge_hw_srpcim_reg __iomem *)
- (hldev->bar0 + val64);
+ hldev->srpcim_reg[i] = hldev->bar0 + val64;
}
for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
- hldev->vpmgmt_reg[i] =
- (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
+ hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
}
for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
- hldev->vpath_reg[i] =
- (struct vxge_hw_vpath_reg __iomem *)
- (hldev->bar0 + val64);
+ hldev->vpath_reg[i] = hldev->bar0 + val64;
}
val64 = readq(&hldev->toc_reg->toc_kdfc);
switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
case 0:
- hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
- VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
+ hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
break;
default:
break;
}
val64 = readq(&toc->toc_common_pointer);
- common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
+ common_reg = bar0 + val64;
status = __vxge_hw_device_vpath_reset_in_prog_check(
(u64 __iomem *)&common_reg->vpath_rst_in_prog);
val64 = readq(&toc->toc_vpmgmt_pointer[i]);
- vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
- (bar0 + val64);
+ vpmgmt_reg = bar0 + val64;
hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
if (__vxge_hw_device_access_rights_get(hw_info->host_type,
val64 = readq(&toc->toc_mrpcim_pointer);
- mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
- (bar0 + val64);
+ mrpcim_reg = bar0 + val64;
writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
wmb();
val64 = readq(&toc->toc_vpath_pointer[i]);
spin_lock_init(&vpath.lock);
- vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
- (bar0 + val64);
+ vpath.vp_reg = bar0 + val64;
vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
continue;
val64 = readq(&toc->toc_vpath_pointer[i]);
- vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
- (bar0 + val64);
+ vpath.vp_reg = bar0 + val64;
vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
status = __vxge_hw_vpath_addr_get(&vpath,
memblock_index, item,
&memblock_item_idx);
- rxdp = (struct vxge_hw_ring_rxd_1 *)
- ring->channel.reserve_arr[reserve_index];
+ rxdp = ring->channel.reserve_arr[reserve_index];
uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
goto vpath_open_exit8;
}
- vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
- stats_block->memblock;
+ vpath->hw_stats = vpath->stats_block->memblock;
memset(vpath->hw_stats, 0,
sizeof(struct vxge_hw_vpath_stats_hw_info));
vxge_hw_channel_dtr_try_complete(channel, rxdh);
- rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
+ rxdp = *rxdh;
if (rxdp == NULL) {
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
goto exit;
channel = &fifo->channel;
txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
- txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
+ txdp_first = txdlh;
txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
txdp_last->control_0 |=
vxge_hw_channel_dtr_try_complete(channel, txdlh);
- txdp = (struct vxge_hw_fifo_txd *)*txdlh;
+ txdp = *txdlh;
if (txdp == NULL) {
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
goto exit;
dsr_rx = cpc_readb(card->hw.scabase + DSR_RX(ch));
- cpc_tty = (st_cpc_tty_area *)pc300dev->cpc_tty;
+ cpc_tty = pc300dev->cpc_tty;
while (1) {
rx_len = 0;
u16 *from_u16_ptr, *to_u16_ptr;
to_u32_ptr = dest_ptr;
- from_u16_ptr = (u16 *) src_ptr;
+ from_u16_ptr = src_ptr;
align_buffer = 0;
for (; length > 3; length -= 4) {
- to_u16_ptr = (u16 *) ((void *) &align_buffer);
+ to_u16_ptr = (u16 *)&align_buffer;
*to_u16_ptr++ = *from_u16_ptr++;
*to_u16_ptr++ = *from_u16_ptr++;
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_cleardev;
- np->tx_ring = (struct yellowfin_desc *)ring_space;
+ np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
- np->rx_ring = (struct yellowfin_desc *)ring_space;
+ np->rx_ring = ring_space;
np->rx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_rx;
- np->tx_status = (struct tx_status_words *)ring_space;
+ np->tx_status = ring_space;
np->tx_status_dma = ring_dma;
if (dev->mem_start)