/**
* nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
* @tx_ring: TX Ring structure to allocate
+ * @cnt: Ring buffer count
*
* Return: 0 on success, negative errno otherwise.
*/
-static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring)
+static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
int sz;
- tx_ring->cnt = nn->txd_cnt;
+ tx_ring->cnt = cnt;
tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
* nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
* @rx_ring: RX ring to allocate
* @fl_bufsz: Size of buffers to allocate
+ * @cnt: Ring buffer count
*
* Return: 0 on success, negative errno otherwise.
*/
static int
-nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz)
+nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
+ u32 cnt)
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
int sz;
- rx_ring->cnt = nn->rxd_cnt;
+ rx_ring->cnt = cnt;
rx_ring->bufsz = fl_bufsz;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
}
static struct nfp_net_rx_ring *
-nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz)
+nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz,
+ u32 buf_cnt)
{
struct nfp_net_rx_ring *rings;
unsigned int r;
for (r = 0; r < nn->num_rx_rings; r++) {
nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
- if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz))
+ if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt))
goto err_free_prev;
if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
if (err)
goto err_free_prev_vecs;
- err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring);
+ err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
if (err)
goto err_cleanup_vec_p;
err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
- nn->fl_bufsz);
+ nn->fl_bufsz, nn->rxd_cnt);
if (err)
goto err_free_tx_ring_p;
}
/* Prepare new rings */
- tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz);
+ tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz,
+ nn->rxd_cnt);
if (!tmp_rings)
return -ENOMEM;