&(txring->dmaalloc),
GFP_ATOMIC);
- if (!txring->alloc) {
- txring->desc = NULL;
- txring->dmaalloc = 0;
- txring->dma = 0;
- return -ENOMEM;
- }
+ if (!txring->alloc)
+ goto err_set_null;
/*
* 16 Bytes align
atomic_set(&txring->next_to_clean, 0);
atomic_set(&txring->nr_free, jme->tx_ring_size);
+ txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
+ jme->tx_ring_size, GFP_ATOMIC);
+ if (unlikely(!(txring->bufinf)))
+ goto err_free_txring;
+
/*
* Initialize Transmit Descriptors
*/
sizeof(struct jme_buffer_info) * jme->tx_ring_size);
return 0;
+
+err_free_txring:
+ dma_free_coherent(&(jme->pdev->dev),
+ TX_RING_ALLOC_SIZE(jme->tx_ring_size),
+ txring->alloc,
+ txring->dmaalloc);
+
+err_set_null:
+ txring->desc = NULL;
+ txring->dmaalloc = 0;
+ txring->dma = 0;
+ txring->bufinf = NULL;
+
+ return -ENOMEM;
}
static void
struct jme_buffer_info *txbi;
if (txring->alloc) {
- for (i = 0 ; i < jme->tx_ring_size ; ++i) {
- txbi = txring->bufinf + i;
- if (txbi->skb) {
- dev_kfree_skb(txbi->skb);
- txbi->skb = NULL;
+ if (txring->bufinf) {
+ for (i = 0 ; i < jme->tx_ring_size ; ++i) {
+ txbi = txring->bufinf + i;
+ if (txbi->skb) {
+ dev_kfree_skb(txbi->skb);
+ txbi->skb = NULL;
+ }
+ txbi->mapping = 0;
+ txbi->len = 0;
+ txbi->nr_desc = 0;
+ txbi->start_xmit = 0;
}
- txbi->mapping = 0;
- txbi->len = 0;
- txbi->nr_desc = 0;
- txbi->start_xmit = 0;
+ kfree(txring->bufinf);
}
dma_free_coherent(&(jme->pdev->dev),
txring->desc = NULL;
txring->dmaalloc = 0;
txring->dma = 0;
+ txring->bufinf = NULL;
}
txring->next_to_use = 0;
atomic_set(&txring->next_to_clean, 0);
atomic_set(&txring->nr_free, 0);
-
}
static inline void
struct jme_ring *rxring = &(jme->rxring[0]);
if (rxring->alloc) {
- for (i = 0 ; i < jme->rx_ring_size ; ++i)
- jme_free_rx_buf(jme, i);
+ if (rxring->bufinf) {
+ for (i = 0 ; i < jme->rx_ring_size ; ++i)
+ jme_free_rx_buf(jme, i);
+ kfree(rxring->bufinf);
+ }
dma_free_coherent(&(jme->pdev->dev),
RX_RING_ALLOC_SIZE(jme->rx_ring_size),
rxring->desc = NULL;
rxring->dmaalloc = 0;
rxring->dma = 0;
+ rxring->bufinf = NULL;
}
rxring->next_to_use = 0;
atomic_set(&rxring->next_to_clean, 0);
RX_RING_ALLOC_SIZE(jme->rx_ring_size),
&(rxring->dmaalloc),
GFP_ATOMIC);
- if (!rxring->alloc) {
- rxring->desc = NULL;
- rxring->dmaalloc = 0;
- rxring->dma = 0;
- return -ENOMEM;
- }
+ if (!rxring->alloc)
+ goto err_set_null;
/*
* 16 Bytes align
rxring->next_to_use = 0;
atomic_set(&rxring->next_to_clean, 0);
+ rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
+ jme->rx_ring_size, GFP_ATOMIC);
+ if (unlikely(!(rxring->bufinf)))
+ goto err_free_rxring;
+
/*
* Initiallize Receive Descriptors
*/
+ memset(rxring->bufinf, 0,
+ sizeof(struct jme_buffer_info) * jme->rx_ring_size);
for (i = 0 ; i < jme->rx_ring_size ; ++i) {
if (unlikely(jme_make_new_rx_buf(jme, i))) {
jme_free_rx_resources(jme);
}
return 0;
+
+err_free_rxring:
+ dma_free_coherent(&(jme->pdev->dev),
+ RX_RING_ALLOC_SIZE(jme->rx_ring_size),
+ rxring->alloc,
+ rxring->dmaalloc);
+err_set_null:
+ rxring->desc = NULL;
+ rxring->dmaalloc = 0;
+ rxring->dma = 0;
+ rxring->bufinf = NULL;
+
+ return -ENOMEM;
}
static inline void