buf[count-1] = inb(NE_BASE + NE_DATAPORT);
}
} else {
- ptrc = (char*)buf;
+ ptrc = buf;
for (cnt = 0; cnt < count; cnt++)
*ptrc++ = inb(NE_BASE + NE_DATAPORT);
}
struct greth_regs *regs;
greth = netdev_priv(dev);
- regs = (struct greth_regs *) greth->regs;
+ regs = greth->regs;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
{
struct netdev_hw_addr *ha;
struct greth_private *greth = netdev_priv(dev);
- struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ struct greth_regs *regs = greth->regs;
u32 mc_filter[2];
unsigned int bitnr;
{
int cfg;
struct greth_private *greth = netdev_priv(dev);
- struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ struct greth_regs *regs = greth->regs;
cfg = GRETH_REGLOAD(regs->control);
if (dev->flags & IFF_PROMISC)
goto error1;
}
- regs = (struct greth_regs *) greth->regs;
+ regs = greth->regs;
greth->irq = ofdev->archdata.irqs[0];
dev_set_drvdata(greth->dev, dev);
skb_put(skb, len); /* make room */
cp_from_buf(lp->type, skb->data,
- (char *)lp->rx_buf_ptr_cpu[entry], len);
+ lp->rx_buf_ptr_cpu[entry], len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
*lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
*lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
- cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);
+ cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
/* Now, give the packet to the lance */
*lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
* bits are reversed.
*/
- addr = (void *)MACE_PROM;
+ addr = MACE_PROM;
for (j = 0; j < 6; ++j) {
u8 v = bitrev8(addr[j<<4]);
int atl1c_phy_init(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
int ret_val;
u16 mii_bmcr_data = BMCR_RESET;
/* select one link mode to get lower power consumption */
int atl1c_phy_to_ps_link(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
int ret = 0;
u16 autoneg_advertised = ADVERTISED_10baseT_Half;
int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
u32 master_ctrl, mac_ctrl, phy_ctrl;
u32 wol_ctrl, speed;
}
for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
tpd_ring[i].buffer_info =
- (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+ (tpd_ring->buffer_info + count);
count += tpd_ring[i].count;
}
rfd_ring->buffer_info =
- (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+ (tpd_ring->buffer_info + count);
count += rfd_ring->count;
rx_desc_count += rfd_ring->count;
*/
static int atl1c_reset_mac(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
u32 ctrl_data = 0;
static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
enum atl1c_trans_queue type)
{
- struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
- &adapter->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
if (eeprom_buff == NULL)
return -ENOMEM;
- ptr = (u32 *)eeprom_buff;
+ ptr = eeprom_buff;
if (eeprom->offset & 3) {
/* need read/modify/write of first changed EEPROM word */
*/
static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
{
- struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
- &adapter->tx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_tx_buffer *tx_buffer = NULL;
struct pci_dev *pdev = adapter->pdev;
u16 index, ring_count;
static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
{
struct atl1e_rx_ring *rx_ring =
- (struct atl1e_rx_ring *)&adapter->rx_ring;
+ &adapter->rx_ring;
struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
u16 i, j;
return err;
}
-static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
+static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
- struct atl1e_rx_ring *rx_ring =
- (struct atl1e_rx_ring *)&adapter->rx_ring;
- struct atl1e_tx_ring *tx_ring =
- (struct atl1e_tx_ring *)&adapter->tx_ring;
+ struct atl1e_hw *hw = &adapter->hw;
+ struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_rx_page_desc *rx_page_desc = NULL;
int i, j;
static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ struct atl1e_hw *hw = &adapter->hw;
u32 dev_ctrl_data = 0;
u32 max_pay_load = 0;
u32 jumbo_thresh = 0;
static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ struct atl1e_hw *hw = &adapter->hw;
u32 rxf_len = 0;
u32 rxf_low = 0;
u32 rxf_high = 0;
static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
{
- struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
- &adapter->tx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_tx_buffer *tx_buffer = NULL;
u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
(struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
u8 rx_using = rx_page_desc[que].rx_using;
- return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]);
+ return &(rx_page_desc[que].rx_page[rx_using]);
}
static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
int *work_done, int work_to_do)
{
struct net_device *netdev = adapter->netdev;
- struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
- &adapter->rx_ring;
+ struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
struct atl1e_rx_page_desc *rx_page_desc =
(struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
struct sk_buff *skb = NULL;
tx_ring->next_to_use = 0;
memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
- return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use];
+ return &tx_ring->desc[next_to_use];
}
static struct atl1e_tx_buffer *
if (wufc) {
/* get link status */
- atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
- atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
mii_advertise_data = ADVERTISE_10HALF;
for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
msleep(100);
atl1e_read_phy_reg(hw, MII_BMSR,
- (u16 *)&mii_bmsr_data);
+ &mii_bmsr_data);
if (mii_bmsr_data & BMSR_LSTATUS)
break;
}
goto err_nomem;
}
rfd_ring->buffer_info =
- (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
+ (tpd_ring->buffer_info + tpd_ring->count);
/*
* real ring DMA buffer
bnapi = &bp->bnx2_napi[i];
- sblk = (void *) (status_blk +
- BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
bnapi->status_blk.msix = sblk;
bnapi->hw_tx_cons_ptr =
&sblk->status_tx_quick_consumer_index;
return;
}
- cqes[0] = (struct kcqe *) &kcqe;
+ cqes[0] = &kcqe;
cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
}
cp->kcq1.sw_prod_idx = 0;
cp->kcq1.hw_prod_idx_ptr =
- (u16 *) &sblk->status_completion_producer_index;
+ &sblk->status_completion_producer_index;
- cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
+ cp->kcq1.status_idx_ptr = &sblk->status_idx;
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
cp->kcq1.hw_prod_idx_ptr =
- (u16 *) &msblk->status_completion_producer_index;
- cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
- cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
+ &msblk->status_completion_producer_index;
+ cp->kcq1.status_idx_ptr = &msblk->status_idx;
+ cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
{
switch (asic_gen) {
case BFI_ASIC_GEN_CT:
- return (u32 *)(bfi_image_ct_cna + off);
+ return (bfi_image_ct_cna + off);
break;
case BFI_ASIC_GEN_CT2:
- return (u32 *)(bfi_image_ct2_cna + off);
+ return (bfi_image_ct2_cna + off);
break;
default:
return NULL;
if (!skb) {
spin_lock_bh(&td->tid_release_lock);
p->ctx = (void *)td->tid_release_list;
- td->tid_release_list = (struct t3c_tid_entry *)p;
+ td->tid_release_list = p;
break;
}
mk_tid_release(skb, p - td->tid_maps.tid_tab);
end = (void *)q->desc + part1;
}
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
- *(u64 *)end = 0;
+ *end = 0;
}
/**
* restart a TX Ethernet Queue which was stopped for lack of
* free TX Queue Descriptors ...
*/
- const struct cpl_sge_egr_update *p = (void *)cpl;
+ const struct cpl_sge_egr_update *p = cpl;
unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
struct sge *s = &adapter->sge;
struct sge_txq *tq;
end = (void *)tq->desc + part1;
}
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
- *(u64 *)end = 0;
+ *end = 0;
}
/**
*/
if (unlikely((void *)sgl == (void *)tq->stat)) {
sgl = (void *)tq->desc;
- end = (void *)((void *)tq->desc +
- ((void *)end - (void *)tq->stat));
+ end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
}
write_sgl(skb, tq, sgl, end, 0, addr);
tmp = srom_rd(aprom_addr, i);
*p++ = cpu_to_le16(tmp);
}
- de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
+ de4x5_dbg_srom(&lp->srom);
}
}
for (; (u32) i < (u32) addr + size4Aling; i += 4)
printk("%08x ", *((u32 *) (i)));
for (; (u32) i < (u32) addr + size; i++)
- printk("%02x", *((u8 *) (i)));
+ printk("%02x", *((i)));
if (notAlign == 1)
printk("\r\n");
}
ringptr->pdl = pdlptr + 1;
ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1);
- ringptr->skb = (void *) NULL;
+ ringptr->skb = NULL;
/*
* Write address and length of first PDL Fragment (which is used for
ringptr->pdl = pdlptr; /* +1; */
ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */
- ringptr->skb = (void *) NULL;
+ ringptr->skb = NULL;
return roundup(MAX_TX_FRAG * 2 + 2, 4);
}
/* Conversion to new PCI API : NOP */
pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
dev_kfree_skb_any(lp->txrhead->skb);
- lp->txrhead->skb = (void *) NULL;
+ lp->txrhead->skb = NULL;
lp->txrhead = lp->txrhead->next;
lp->txrcommit--;
}
memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
lp->set_add.command = CmdIASetup;
- i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add);
+ i596_add_cmd(dev, &lp->set_add);
lp->tdr.command = CmdTDR;
- i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr);
+ i596_add_cmd(dev, &lp->tdr);
if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
return 1;
lp = netdev_priv(dev);
while (lp->cmd_head) {
- cmd = (struct i596_cmd *)lp->cmd_head;
+ cmd = lp->cmd_head;
lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
lp->cmd_backlog--;
lp->i596_config[8] |= 0x01;
}
- i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf);
+ i596_add_cmd(dev, &lp->set_conf);
}
}
}
#endif
- ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */
+ ptr = alloc_rfa(dev,ptr); /* init receive-frame-area */
/*
* alloc xmit-buffs / init xmit_cmds
ptr = (char *) ptr + XMIT_BUFF_SIZE;
p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
ptr = (char *) ptr + sizeof(struct tbd_struct);
- if((void *)ptr > (void *)dev->mem_end)
+ if(ptr > (void *)dev->mem_end)
{
printk("%s: not enough shared-mem for your configuration!\n",dev->name);
return 1;
goto out_free;
}
- rx_desc = (struct rx_desc *)rxq->rx_desc_area;
+ rx_desc = rxq->rx_desc_area;
for (i = 0; i < rxq->rx_ring_size; i++) {
int nexti;
txq->tx_desc_area_size = size;
- tx_desc = (struct tx_desc *)txq->tx_desc_area;
+ tx_desc = txq->tx_desc_area;
for (i = 0; i < txq->tx_ring_size; i++) {
struct tx_desc *txd = tx_desc + i;
int nexti;
}
memset((void *)pep->p_rx_desc_area, 0, size);
/* initialize the next_desc_ptr links in the Rx descriptors ring */
- p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+ p_rx_desc = pep->p_rx_desc_area;
for (i = 0; i < rx_desc_num; i++) {
p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
}
memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
- p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+ p_tx_desc = pep->p_tx_desc_area;
for (i = 0; i < tx_desc_num; i++) {
p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
r->com.to_state = state;
r->com.state = RES_QP_BUSY;
if (qp)
- *qp = (struct res_qp *)r;
+ *qp = r;
}
}
r->com.to_state = state;
r->com.state = RES_MPT_BUSY;
if (mpt)
- *mpt = (struct res_mpt *)r;
+ *mpt = r;
}
}
if (sp->rxd_mode == RXD_MODE_3B)
ba = &ring->ba[j][k];
if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
- (u64 *)&temp0_64,
- (u64 *)&temp1_64,
- (u64 *)&temp2_64,
+ &temp0_64,
+ &temp1_64,
+ &temp2_64,
size) == -ENOMEM) {
return 0;
}
int i, ret = 0;
struct config_param *config;
struct mac_info *mac_control;
- struct net_device *dev = (struct net_device *)sp->dev;
+ struct net_device *dev = sp->dev;
u16 interruptible;
/* Initialize the H/W I/O registers */
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
{
struct s2io_nic *sp = ring_data->nic;
- struct net_device *dev = (struct net_device *)ring_data->dev;
+ struct net_device *dev = ring_data->dev;
struct sk_buff *skb = (struct sk_buff *)
((unsigned long)rxdp->Host_Control);
int ring_no = ring_data->ring_no;
static void s2io_link(struct s2io_nic *sp, int link)
{
- struct net_device *dev = (struct net_device *)sp->dev;
+ struct net_device *dev = sp->dev;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
if (link != sp->last_link_state) {
return -1;
}
- *ip = (struct iphdr *)((u8 *)buffer + ip_off);
+ *ip = (struct iphdr *)(buffer + ip_off);
ip_len = (u8)((*ip)->ihl);
ip_len <<= 2;
*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
for (i = 0; i < nreq; i++)
vxge_os_dma_malloc_async(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
blockpool->hldev, VXGE_HW_BLOCK_SIZE);
}
break;
pci_unmap_single(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
((struct __vxge_hw_blockpool_entry *)p)->length,
PCI_DMA_BIDIRECTIONAL);
vxge_os_dma_free(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
((struct __vxge_hw_blockpool_entry *)p)->memblock,
&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
- vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
+ vpath = &hldev->virtual_paths[vp_id];
if (vpath->ringh) {
status = __vxge_hw_ring_reset(vpath->ringh);
/* misaligned, free current one and try allocating
* size + VXGE_CACHE_LINE_SIZE memory
*/
- kfree((void *) vaddr);
+ kfree(vaddr);
size += VXGE_CACHE_LINE_SIZE;
realloc_flag = 1;
goto realloc;
"%s:%d", __func__, __LINE__);
vdev = netdev_priv(dev);
- hldev = (struct __vxge_hw_device *)vdev->devh;
+ hldev = vdev->devh;
if (unlikely(!is_vxge_card_up(vdev)))
return;
continue;
vxge_debug_ll_config(VXGE_TRACE,
"%s: MTU size - %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].mtu);
vxge_debug_init(VXGE_TRACE,
"%s: VLAN tag stripping %s", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].rpa_strip_vlan_tag
? "Enabled" : "Disabled");
vxge_debug_ll_config(VXGE_TRACE,
"%s: Max frags : %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].fifo.max_frags);
break;
}
/* notify driver */
if (hldev->uld_callbacks->crit_err)
- hldev->uld_callbacks->crit_err(
- (struct __vxge_hw_device *)hldev,
+ hldev->uld_callbacks->crit_err(hldev,
type, vp_id);
out:
/* check whether it is not the end */
if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
- vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
+ vxge_assert((rxdp)->host_control !=
0);
++ring->cmpl_cnt;
unsigned long *rxr;
u32 w0, err;
- rxr = (unsigned long *) ip->rxr; /* Ring base */
+ rxr = ip->rxr; /* Ring base */
rx_entry = ip->rx_ci; /* RX consume index */
n_entry = ip->rx_pi;
if (ip->rxr == NULL) {
/* Allocate and initialize rx ring. 4kb = 512 entries */
ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
- rxr = (unsigned long *) ip->rxr;
+ rxr = ip->rxr;
if (!rxr)
printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
goto out_free_io_4;
/* descriptors are aligned due to the nature of pci_alloc_consistent */
- pd->tx_ring = (struct smsc9420_dma_desc *)
- (pd->rx_ring + RX_RING_SIZE);
+ pd->tx_ring = (pd->rx_ring + RX_RING_SIZE);
pd->tx_dma_addr = pd->rx_dma_addr +
sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
platform_set_drvdata(pdev, NULL);
- iounmap((void *)priv->ioaddr);
+ iounmap((void __force __iomem *)priv->ioaddr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
} else {
skb_reserve(skb, 2);
skb_put(skb, len);
- skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
+ skb_copy_to_linear_data(skb, this_qbuf,
len);
skb->protocol = eth_type_trans(skb, qep->dev);
netif_rx(skb);
velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
- velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
+ velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
opts->numrx = (opts->numrx & ~3);
}