* We will need one channel per interrupt.
*/
wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
- efx->rss_queues = min(wanted_ints, max_channels);
+ efx->n_rx_queues = min(wanted_ints, max_channels);
- for (i = 0; i < efx->rss_queues; i++)
+ for (i = 0; i < efx->n_rx_queues; i++)
xentries[i].entry = i;
- rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
+ rc = pci_enable_msix(efx->pci_dev, xentries, efx->n_rx_queues);
if (rc > 0) {
- EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
- efx->rss_queues = rc;
+ EFX_BUG_ON_PARANOID(rc >= efx->n_rx_queues);
+ efx->n_rx_queues = rc;
rc = pci_enable_msix(efx->pci_dev, xentries,
- efx->rss_queues);
+ efx->n_rx_queues);
}
if (rc == 0) {
- for (i = 0; i < efx->rss_queues; i++)
+ for (i = 0; i < efx->n_rx_queues; i++)
efx->channel[i].irq = xentries[i].vector;
} else {
/* Fall back to single channel MSI */
/* Try single interrupt MSI */
if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
- efx->rss_queues = 1;
+ efx->n_rx_queues = 1;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx->channel[0].irq = efx->pci_dev->irq;
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->rss_queues = 1;
+ efx->n_rx_queues = 1;
efx->legacy_irq = efx->pci_dev->irq;
}
}
efx->legacy_irq = 0;
}
-/* Select number of used resources
- * Should be called after probe_interrupts()
- */
-static void efx_select_used(struct efx_nic *efx)
+static void efx_set_channels(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
- int i;
efx_for_each_tx_queue(tx_queue, efx) {
if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels)
tx_queue->channel->used_flags |= EFX_USED_BY_TX;
}
- /* RX queues. Each has a dedicated channel. */
- for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
- rx_queue = &efx->rx_queue[i];
-
- if (i < efx->rss_queues) {
- rx_queue->used = true;
- /* If we allow multiple RX queues per channel
- * we need to decide that here
- */
- rx_queue->channel = &efx->channel[rx_queue->queue];
- rx_queue->channel->used_flags |= EFX_USED_BY_RX;
- rx_queue++;
- }
+ efx_for_each_rx_queue(rx_queue, efx) {
+ rx_queue->channel = &efx->channel[rx_queue->queue];
+ rx_queue->channel->used_flags |= EFX_USED_BY_RX;
}
}
* in MSI-X interrupts. */
efx_probe_interrupts(efx);
- /* Determine number of RX queues and TX queues */
- efx_select_used(efx);
+ efx_set_channels(efx);
/* Initialise the interrupt moderation settings */
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
offset < RX_RSS_INDIR_TBL_B0 + 0x800;
offset += 0x10) {
EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
- i % efx->rss_queues);
+ i % efx->n_rx_queues);
falcon_writel(efx, &dword, offset);
i++;
}
if (falcon_rev(efx) >= FALCON_REV_B0)
EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
else
- EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
+ EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->n_rx_queues - 1);
if (EFX_WORKAROUND_7244(efx)) {
EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
* struct efx_rx_queue - An Efx RX queue
* @efx: The associated Efx NIC
* @queue: DMA queue number
- * @used: Queue is used by net driver
* @channel: The associated channel
* @buffer: The software buffer ring
* @rxd: The hardware descriptor ring
struct efx_rx_queue {
struct efx_nic *efx;
int queue;
- bool used;
struct efx_channel *channel;
struct efx_rx_buffer *buffer;
struct efx_special_buffer rxd;
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
* @channel: Channels
- * @rss_queues: Number of RSS queues
+ * @n_rx_queues: Number of RX queues
* @rx_buffer_len: RX buffer length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @irq_status: Interrupt status buffer
struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
struct efx_channel channel[EFX_MAX_CHANNELS];
- int rss_queues;
+ int n_rx_queues;
unsigned int rx_buffer_len;
unsigned int rx_buffer_order;
/* Iterate over all used RX queues */
#define efx_for_each_rx_queue(_rx_queue, _efx) \
for (_rx_queue = &_efx->rx_queue[0]; \
- _rx_queue < &_efx->rx_queue[EFX_MAX_RX_QUEUES]; \
- _rx_queue++) \
- if (!_rx_queue->used) \
- continue; \
- else
+ _rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \
+ _rx_queue++)
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
for (_rx_queue = &_channel->efx->rx_queue[0]; \
_rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES]; \
_rx_queue++) \
- if ((!_rx_queue->used) || \
- (_rx_queue->channel != _channel)) \
+ if (_rx_queue->channel != _channel) \
continue; \
else
/* Allocate RX buffers */
rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
- if (!rx_queue->buffer) {
- rc = -ENOMEM;
- goto fail1;
- }
+ if (!rx_queue->buffer)
+ return -ENOMEM;
rc = falcon_probe_rx(rx_queue);
- if (rc)
- goto fail2;
-
- return 0;
-
- fail2:
- kfree(rx_queue->buffer);
- rx_queue->buffer = NULL;
- fail1:
- rx_queue->used = 0;
-
+ if (rc) {
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+ }
return rc;
}
kfree(rx_queue->buffer);
rx_queue->buffer = NULL;
- rx_queue->used = 0;
}
void efx_flush_lro(struct efx_channel *channel)