* @nn: NFP Net device structure
* @r_vec: Ring vector to be started
*/
-static int nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
+static void
+nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{
unsigned int irq_vec;
- int err = 0;
irq_vec = nn->irq_entries[r_vec->irq_idx].vector;
disable_irq(irq_vec);
- err = nfp_net_rx_ring_bufs_alloc(r_vec->nfp_net, r_vec->rx_ring);
- if (err) {
- nn_err(nn, "RV%02d: couldn't allocate enough buffers\n",
- r_vec->irq_idx);
- goto out;
- }
nfp_net_rx_ring_fill_freelist(r_vec->rx_ring);
-
napi_enable(&r_vec->napi);
-out:
- enable_irq(irq_vec);
- return err;
+ enable_irq(irq_vec);
}
static int nfp_net_netdev_open(struct net_device *netdev)
err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring);
if (err)
goto err_free_tx_ring_p;
+
+ err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
+ if (err)
+ goto err_flush_rx_ring_p;
}
err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
* - enable all TX queues
* - set link state
*/
- for (r = 0; r < nn->num_r_vecs; r++) {
- err = nfp_net_start_vec(nn, &nn->r_vecs[r]);
- if (err)
- goto err_disable_napi;
- }
+ for (r = 0; r < nn->num_r_vecs; r++)
+ nfp_net_start_vec(nn, &nn->r_vecs[r]);
netif_tx_wake_all_queues(netdev);
return 0;
-err_disable_napi:
- while (r--) {
- napi_disable(&nn->r_vecs[r].napi);
- nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
- nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
- }
err_clear_config:
nfp_net_clear_config_and_disable(nn);
err_free_rings:
r = nn->num_r_vecs;
err_free_prev_vecs:
while (r--) {
+ nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
+err_flush_rx_ring_p:
nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
err_free_tx_ring_p:
nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);