* Add a buffer of the given length to the supplied HW and SW Rx
* descriptors.
*/
-static inline void add_one_rx_buf(void *va, unsigned int len,
- struct rx_desc *d, struct rx_sw_desc *sd,
- unsigned int gen, struct pci_dev *pdev)
+static inline int add_one_rx_buf(void *va, unsigned int len,
+ struct rx_desc *d, struct rx_sw_desc *sd,
+ unsigned int gen, struct pci_dev *pdev)
{
dma_addr_t mapping;
mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(mapping)))
+ return -ENOMEM;
+
pci_unmap_addr_set(sd, dma_addr, mapping);
d->addr_lo = cpu_to_be32(mapping);
wmb();
d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+ return 0;
}
static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
* allocated with the supplied gfp flags. The caller must assure that
* @n does not exceed the queue's capacity.
*/
-static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
+static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
{
void *buf_start;
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
struct rx_desc *d = &q->desc[q->pidx];
+ unsigned int count = 0;
while (n--) {
+ int err;
+
if (q->use_pages) {
if (unlikely(alloc_pg_chunk(q, sd, gfp))) {
nomem: q->alloc_failed++;
buf_start = skb->data;
}
- add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
- adap->pdev);
+ err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
+ adap->pdev);
+ if (unlikely(err)) {
+ if (!q->use_pages) {
+ kfree_skb(sd->skb);
+ sd->skb = NULL;
+ }
+ break;
+ }
+
d++;
sd++;
if (++q->pidx == q->size) {
d = q->desc;
}
q->credits++;
+ count++;
}
wmb();
- t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+ if (likely(count))
+ t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+
+ return count;
}
static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
int irq_vec_idx, const struct qset_params *p,
int ntxq, struct net_device *dev)
{
- int i, ret = -ENOMEM;
+ int i, avail, ret = -ENOMEM;
struct sge_qset *q = &adapter->sge.qs[id];
init_qset_cntxt(q, id);
q->adap = adapter;
q->netdev = dev;
t3_update_qset_coalesce(q, p);
+ avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
+ if (!avail) {
+ CH_ALERT(adapter, "free list queue 0 initialization failed\n");
+ goto err;
+ }
+ if (avail < q->fl[0].size)
+ CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
+ avail);
- refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
- refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
+ avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
+ if (avail < q->fl[1].size)
+ CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
+ avail);
refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
return 0;
- err_unlock:
+err_unlock:
spin_unlock_irq(&adapter->sge.reg_lock);
- err:
+err:
t3_free_qset(adapter, q);
return ret;
}