u32 rx_buf_failed;
u32 rx_page_failed;
- /* These are arrays of rings, allocated at run-time */
- struct i40e_ring *rx_rings;
- struct i40e_ring *tx_rings;
+ /* These are containers of ring pointers, allocated at run-time */
+ struct i40e_ring **rx_rings;
+ struct i40e_ring **tx_rings;
u16 work_limit;
/* high bit set means dynamic, use accessor routines to read/write.
for (i = 0; i < vsi->num_queue_pairs; i++) {
len = sizeof(struct i40e_tx_buffer);
- memcpy(p, vsi->tx_rings[i].tx_bi, len);
+ memcpy(p, vsi->tx_rings[i]->tx_bi, len);
p += len;
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
len = sizeof(struct i40e_rx_buffer);
- memcpy(p, vsi->rx_rings[i].rx_bi, len);
+ memcpy(p, vsi->rx_rings[i]->rx_bi, len);
p += len;
}
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
- if (vsi->rx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: desc = %p\n",
- i, vsi->rx_rings[i].desc);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
- i, vsi->rx_rings[i].dev,
- vsi->rx_rings[i].netdev,
- vsi->rx_rings[i].rx_bi);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, vsi->rx_rings[i].state,
- vsi->rx_rings[i].queue_index,
- vsi->rx_rings[i].reg_idx);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
- i, vsi->rx_rings[i].rx_hdr_len,
- vsi->rx_rings[i].rx_buf_len,
- vsi->rx_rings[i].dtype);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, vsi->rx_rings[i].hsplit,
- vsi->rx_rings[i].next_to_use,
- vsi->rx_rings[i].next_to_clean,
- vsi->rx_rings[i].ring_active);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
- i, vsi->rx_rings[i].stats.packets,
- vsi->rx_rings[i].stats.bytes,
- vsi->rx_rings[i].rx_stats.non_eop_descs);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
- i,
- vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
- vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
- i, vsi->rx_rings[i].size,
- (long unsigned int)vsi->rx_rings[i].dma);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: vsi = %p, q_vector = %p\n",
- i, vsi->rx_rings[i].vsi,
- vsi->rx_rings[i].q_vector);
- }
+ rcu_read_lock();
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+ if (!rx_ring)
+ continue;
+
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: desc = %p\n",
+ i, rx_ring->desc);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+ i, rx_ring->dev,
+ rx_ring->netdev,
+ rx_ring->rx_bi);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, rx_ring->state,
+ rx_ring->queue_index,
+ rx_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+ i, rx_ring->rx_hdr_len,
+ rx_ring->rx_buf_len,
+ rx_ring->dtype);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, rx_ring->hsplit,
+ rx_ring->next_to_use,
+ rx_ring->next_to_clean,
+ rx_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+ i, rx_ring->stats.packets,
+ rx_ring->stats.bytes,
+ rx_ring->rx_stats.non_eop_descs);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+ i,
+ rx_ring->rx_stats.alloc_rx_page_failed,
+ rx_ring->rx_stats.alloc_rx_buff_failed);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, rx_ring->size,
+ (long unsigned int)rx_ring->dma);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, rx_ring->vsi,
+ rx_ring->q_vector);
}
- if (vsi->tx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: desc = %p\n",
- i, vsi->tx_rings[i].desc);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
- i, vsi->tx_rings[i].dev,
- vsi->tx_rings[i].netdev,
- vsi->tx_rings[i].tx_bi);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, vsi->tx_rings[i].state,
- vsi->tx_rings[i].queue_index,
- vsi->tx_rings[i].reg_idx);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dtype = %d\n",
- i, vsi->tx_rings[i].dtype);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, vsi->tx_rings[i].hsplit,
- vsi->tx_rings[i].next_to_use,
- vsi->tx_rings[i].next_to_clean,
- vsi->tx_rings[i].ring_active);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
- i, vsi->tx_rings[i].stats.packets,
- vsi->tx_rings[i].stats.bytes,
- vsi->tx_rings[i].tx_stats.restart_queue);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
- i,
- vsi->tx_rings[i].tx_stats.tx_busy,
- vsi->tx_rings[i].tx_stats.tx_done_old);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
- i, vsi->tx_rings[i].size,
- (long unsigned int)vsi->tx_rings[i].dma);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: vsi = %p, q_vector = %p\n",
- i, vsi->tx_rings[i].vsi,
- vsi->tx_rings[i].q_vector);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: DCB tc = %d\n",
- i, vsi->tx_rings[i].dcb_tc);
- }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ if (!tx_ring)
+ continue;
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: desc = %p\n",
+ i, tx_ring->desc);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+ i, tx_ring->dev,
+ tx_ring->netdev,
+ tx_ring->tx_bi);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, tx_ring->state,
+ tx_ring->queue_index,
+ tx_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dtype = %d\n",
+ i, tx_ring->dtype);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, tx_ring->hsplit,
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+ i, tx_ring->stats.packets,
+ tx_ring->stats.bytes,
+ tx_ring->tx_stats.restart_queue);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+ i,
+ tx_ring->tx_stats.tx_busy,
+ tx_ring->tx_stats.tx_done_old);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, tx_ring->size,
+ (long unsigned int)tx_ring->dma);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, tx_ring->vsi,
+ tx_ring->q_vector);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: DCB tc = %d\n",
+ i, tx_ring->dcb_tc);
}
+ rcu_read_unlock();
dev_info(&pf->pdev->dev,
" work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
vsi->work_limit, vsi->rx_itr_setting,
return;
}
if (is_rx_ring)
- ring = vsi->rx_rings[ring_id];
+ ring = *vsi->rx_rings[ring_id];
else
- ring = vsi->tx_rings[ring_id];
+ ring = *vsi->tx_rings[ring_id];
if (cnt == 2) {
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = vsi->rx_rings[0].count;
- ring->tx_pending = vsi->tx_rings[0].count;
+ ring->rx_pending = vsi->rx_rings[0]->count;
+ ring->tx_pending = vsi->tx_rings[0]->count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
- if ((new_tx_count == vsi->tx_rings[0].count) &&
- (new_rx_count == vsi->rx_rings[0].count))
+ if ((new_tx_count == vsi->tx_rings[0]->count) &&
+ (new_rx_count == vsi->rx_rings[0]->count))
return 0;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
if (!netif_running(vsi->netdev)) {
/* simple case - set for the next time the netdev is started */
for (i = 0; i < vsi->num_queue_pairs; i++) {
- vsi->tx_rings[i].count = new_tx_count;
- vsi->rx_rings[i].count = new_rx_count;
+ vsi->tx_rings[i]->count = new_tx_count;
+ vsi->rx_rings[i]->count = new_rx_count;
}
goto done;
}
*/
/* alloc updated Tx resources */
- if (new_tx_count != vsi->tx_rings[0].count) {
+ if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n",
- vsi->tx_rings[0].count, new_tx_count);
+ vsi->tx_rings[0]->count, new_tx_count);
tx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
- tx_rings[i] = vsi->tx_rings[i];
+ tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
err = i40e_setup_tx_descriptors(&tx_rings[i]);
if (err) {
}
/* alloc updated Rx resources */
- if (new_rx_count != vsi->rx_rings[0].count) {
+ if (new_rx_count != vsi->rx_rings[0]->count) {
netdev_info(netdev,
"Changing Rx descriptor count from %d to %d\n",
- vsi->rx_rings[0].count, new_rx_count);
+ vsi->rx_rings[0]->count, new_rx_count);
rx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
- rx_rings[i] = vsi->rx_rings[i];
+ rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err) {
if (tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_tx_resources(&vsi->tx_rings[i]);
- vsi->tx_rings[i] = tx_rings[i];
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ *vsi->tx_rings[i] = tx_rings[i];
}
kfree(tx_rings);
tx_rings = NULL;
if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_rx_resources(&vsi->rx_rings[i]);
- vsi->rx_rings[i] = rx_rings[i];
+ i40e_free_rx_resources(vsi->rx_rings[i]);
+ *vsi->rx_rings[i] = rx_rings[i];
}
kfree(rx_rings);
rx_rings = NULL;
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
- data[i] = vsi->tx_rings[j].stats.packets;
- data[i + 1] = vsi->tx_rings[j].stats.bytes;
- data[i + 2] = vsi->rx_rings[j].stats.packets;
- data[i + 3] = vsi->rx_rings[j].stats.bytes;
+ data[i] = vsi->tx_rings[j]->stats.packets;
+ data[i + 1] = vsi->tx_rings[j]->stats.bytes;
+ data[i + 2] = vsi->rx_rings[j]->stats.packets;
+ data[i + 3] = vsi->rx_rings[j]->stats.bytes;
}
if (vsi == pf->vsi[pf->lan_vsi]) {
for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
if (vsi->rx_rings)
for (i = 0; i < vsi->num_queue_pairs; i++) {
- memset(&vsi->rx_rings[i].stats, 0 ,
- sizeof(vsi->rx_rings[i].stats));
- memset(&vsi->rx_rings[i].rx_stats, 0 ,
- sizeof(vsi->rx_rings[i].rx_stats));
- memset(&vsi->tx_rings[i].stats, 0 ,
- sizeof(vsi->tx_rings[i].stats));
- memset(&vsi->tx_rings[i].tx_stats, 0,
- sizeof(vsi->tx_rings[i].tx_stats));
+ memset(&vsi->rx_rings[i]->stats, 0 ,
+ sizeof(vsi->rx_rings[i]->stats));
+ memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+ sizeof(vsi->rx_rings[i]->rx_stats));
+ memset(&vsi->tx_rings[i]->stats, 0 ,
+ sizeof(vsi->tx_rings[i]->stats));
+ memset(&vsi->tx_rings[i]->tx_stats, 0,
+ sizeof(vsi->tx_rings[i]->tx_stats));
}
vsi->stat_offsets_loaded = false;
}
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = &vsi->tx_rings[i];
+ struct i40e_ring *ring = vsi->tx_rings[i];
clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
}
}
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = &vsi->tx_rings[i];
+ struct i40e_ring *ring = vsi->tx_rings[i];
tc = ring->dcb_tc;
if (xoff[tc])
for (q = 0; q < vsi->num_queue_pairs; q++) {
struct i40e_ring *p;
- p = &vsi->rx_rings[q];
+ p = vsi->rx_rings[q];
rx_b += p->stats.bytes;
rx_p += p->stats.packets;
rx_buf += p->rx_stats.alloc_rx_buff_failed;
rx_page += p->rx_stats.alloc_rx_page_failed;
- p = &vsi->tx_rings[q];
+ p = vsi->tx_rings[q];
tx_b += p->stats.bytes;
tx_p += p->stats.packets;
tx_restart += p->tx_stats.restart_queue;
int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
+ err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
return err;
}
int i;
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i].desc)
- i40e_free_tx_resources(&vsi->tx_rings[i]);
+ if (vsi->tx_rings[i]->desc)
+ i40e_free_tx_resources(vsi->tx_rings[i]);
}
/**
int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
+ err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
return err;
}
int i;
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->rx_rings[i].desc)
- i40e_free_rx_resources(&vsi->rx_rings[i]);
+ if (vsi->rx_rings[i]->desc)
+ i40e_free_rx_resources(vsi->rx_rings[i]);
}
/**
int err = 0;
u16 i;
- for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
- err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
+ for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+ err = i40e_configure_tx_ring(vsi->tx_rings[i]);
return err;
}
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
+ err = i40e_configure_rx_ring(vsi->rx_rings[i]);
return err;
}
qoffset = vsi->tc_config.tc_info[n].qoffset;
qcount = vsi->tc_config.tc_info[n].qcount;
for (i = qoffset; i < (qoffset + qcount); i++) {
- struct i40e_ring *rx_ring = &vsi->rx_rings[i];
- struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ struct i40e_ring *rx_ring = vsi->rx_rings[i];
+ struct i40e_ring *tx_ring = vsi->tx_rings[i];
rx_ring->dcb_tc = n;
tx_ring->dcb_tc = n;
}
int i;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
- wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
}
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
{
struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
- struct i40e_ring *tx_ring = &(vsi->tx_rings[qp_idx]);
- struct i40e_ring *rx_ring = &(vsi->rx_rings[qp_idx]);
+ struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
+ struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring;
i40e_napi_disable_all(vsi);
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_clean_tx_ring(&vsi->tx_rings[i]);
- i40e_clean_rx_ring(&vsi->rx_rings[i]);
+ i40e_clean_tx_ring(vsi->tx_rings[i]);
+ i40e_clean_rx_ring(vsi->rx_rings[i]);
}
}
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- set_check_for_tx_hang(&vsi->tx_rings[i]);
+ set_check_for_tx_hang(vsi->tx_rings[i]);
if (test_bit(__I40E_HANG_CHECK_ARMED,
- &vsi->tx_rings[i].state))
+ &vsi->tx_rings[i]->state))
armed++;
}
int ret = -ENODEV;
struct i40e_vsi *vsi;
int sz_vectors;
+ int sz_rings;
int vsi_idx;
int i;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list);
- i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_set_num_rings_in_vsi(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* allocate memory for ring pointers */
+ sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
+ if (!vsi->tx_rings) {
+ ret = -ENOMEM;
+ goto err_rings;
+ }
+ vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
/* allocate memory for q_vector pointers */
sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
goto unlock_pf;
err_vectors:
+ kfree(vsi->tx_rings);
+err_rings:
pf->next_vsi = i - 1;
kfree(vsi);
unlock_pf:
/* free the ring and vector containers */
kfree(vsi->q_vectors);
+ kfree(vsi->tx_rings);
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
return 0;
}
+/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+ int i;
+
+ for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+ kfree_rcu(vsi->tx_rings[i], rcu);
+ vsi->tx_rings[i] = NULL;
+ vsi->rx_rings[i] = NULL;
+ }
+
+ return 0;
+}
+
/**
* i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being configured
static int i40e_alloc_rings(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int ret = 0;
int i;
- vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
- sizeof(struct i40e_ring), GFP_KERNEL);
- if (!vsi->rx_rings) {
- ret = -ENOMEM;
- goto err_alloc_rings;
- }
-
- vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
- sizeof(struct i40e_ring), GFP_KERNEL);
- if (!vsi->tx_rings) {
- ret = -ENOMEM;
- kfree(vsi->rx_rings);
- goto err_alloc_rings;
- }
-
/* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
- struct i40e_ring *rx_ring = &vsi->rx_rings[i];
- struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ struct i40e_ring *tx_ring;
+ struct i40e_ring *rx_ring;
+
+ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ if (!tx_ring)
+ goto err_out;
tx_ring->queue_index = i;
tx_ring->reg_idx = vsi->base_queue + i;
tx_ring->count = vsi->num_desc;
tx_ring->size = 0;
tx_ring->dcb_tc = 0;
+ vsi->tx_rings[i] = tx_ring;
+ rx_ring = &tx_ring[1];
rx_ring->queue_index = i;
rx_ring->reg_idx = vsi->base_queue + i;
rx_ring->ring_active = false;
set_ring_16byte_desc_enabled(rx_ring);
else
clear_ring_16byte_desc_enabled(rx_ring);
- }
-
-err_alloc_rings:
- return ret;
-}
-
-/**
- * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
- * @vsi: the VSI being cleaned
- **/
-static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
-{
- if (vsi) {
- kfree(vsi->rx_rings);
- kfree(vsi->tx_rings);
+ vsi->rx_rings[i] = rx_ring;
}
return 0;
+
+err_out:
+ i40e_vsi_clear_rings(vsi);
+ return -ENOMEM;
}
/**
if (!vsi)
return -ENOENT;
- tx_ring = &vsi->tx_rings[0];
+ tx_ring = vsi->tx_rings[0];
dev = tx_ring->dev;
dma = dma_map_single(dev, fdir_data->raw_packet,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
+ struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
/* hardware can't handle really short frames, hardware padding works
* beyond this point
struct i40e_vsi *vsi; /* Backreference to associated VSI */
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+ struct rcu_head rcu; /* to avoid race on free */
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {