u16 ntxd; /* # tx descriptors tunable */
u16 txin; /* index of next descriptor to reclaim */
u16 txout; /* index of next descriptor to post */
- void **txp; /* pointer to parallel array of pointers to packets */
+ /* pointer to parallel array of pointers to packets */
+ struct sk_buff **txp;
struct dma_seg_map *txp_dmah; /* DMA MAP meta-data handle */
/* Aligned physical address of descriptor ring */
unsigned long txdpa;
u16 nrxd; /* # rx descriptors tunable */
u16 rxin; /* index of next descriptor to reclaim */
u16 rxout; /* index of next descriptor to post */
- void **rxp; /* pointer to parallel array of pointers to packets */
+ /* pointer to parallel array of pointers to packets */
+ struct sk_buff **rxp;
struct dma_seg_map *rxp_dmah; /* DMA MAP meta-data handle */
/* Aligned physical address of descriptor ring */
unsigned long rxdpa;
static void _dma_ddtable_init(struct dma_info *di, uint direction,
unsigned long pa);
static void _dma_rxenable(struct dma_info *di);
-static void *_dma_getnextrxp(struct dma_info *di, bool forceall);
+static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall);
static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags);
static u8 dma_align_sizetobits(uint size);
static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
/* Prototypes for 64-bit routines */
static bool dma64_alloc(struct dma_info *di, uint direction);
-static void *dma64_getnextrxp(struct dma_info *di, bool forceall);
+static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall);
static bool dma64_rxidle(struct dma_info *di);
static bool _dma64_addrext(struct dma64regs *dma64regs);
di->rxin = di->rxout = 0;
/* clear rx descriptor ring */
- memset((void *)di->rxd64, '\0',
- (di->nrxd * sizeof(struct dma64desc)));
+ memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc));
/* DMA engine with out alignment requirement requires table to be inited
* before enabling the engine
* buffer data. After it reaches the max size of buffer, the data continues
* in next DMA descriptor buffer WITHOUT DMA header
*/
-void *dma_rx(struct dma_pub *pub)
+struct sk_buff *dma_rx(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p, *head, *tail;
void dma_rxreclaim(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- void *p;
+ struct sk_buff *p;
DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
brcmu_pkt_buf_free_skb(p);
}
-static void *_dma_getnextrxp(struct dma_info *di, bool forceall)
+static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
{
if (di->nrxd == 0)
return NULL;
di->dma.txavail = di->ntxd - 1;
/* clear tx descriptor ring */
- memset((void *)di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc)));
+ memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc)));
/* DMA engine with out alignment requirement requires table to be inited
* before enabling the engine
void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
{
struct dma_info *di = (struct dma_info *)pub;
- void *p;
+ struct sk_buff *p;
DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
(range == DMA_RANGE_ALL) ? "all" :
* If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
* return associated packet regardless of the value of hardware pointers.
*/
-void *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
+struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
{
struct dma_info *di = (struct dma_info *)pub;
u16 start, end, i;
u16 active_desc;
- void *txp;
+ struct sk_buff *txp;
DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
(range == DMA_RANGE_ALL) ? "all" :
return NULL;
}
-static void *dma64_getnextrxp(struct dma_info *di, bool forceall)
+static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
{
uint i, curr;
- void *rxp;
+ struct sk_buff *rxp;
unsigned long pa;
i = di->rxin;
uint nrxpost, uint rxoffset, uint *msg_level);
void dma_rxinit(struct dma_pub *pub);
-void *dma_rx(struct dma_pub *pub);
+struct sk_buff *dma_rx(struct dma_pub *pub);
bool dma_rxfill(struct dma_pub *pub);
bool dma_rxreset(struct dma_pub *pub);
bool dma_txreset(struct dma_pub *pub);
void dma_rxreclaim(struct dma_pub *pub);
void dma_detach(struct dma_pub *pub);
unsigned long dma_getvar(struct dma_pub *pub, const char *name);
-void *dma_getnexttxp(struct dma_pub *pub, enum txd_range range);
+struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range);
void dma_counterreset(struct dma_pub *pub);
void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)