if (rc)
goto fail;
+ efx->type->dimension_resources(efx);
+
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
return rc;
}
+static void falcon_dimension_resources(struct efx_nic *efx)
+{
+ efx->rx_dc_base = 0x20000;
+ efx->tx_dc_base = 0x26000;
+}
+
/* Probe all SPI devices on the NIC */
static void falcon_probe_spi_devices(struct efx_nic *efx)
{
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
+ .dimension_resources = falcon_dimension_resources,
.fini = efx_port_dummy_op_void,
.monitor = falcon_monitor,
.map_reset_reason = falcon_map_reset_reason,
.max_interrupt_mode = EFX_INT_MODE_MSI,
.phys_addr_channels = 4,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
- .tx_dc_base = 0x130000,
- .rx_dc_base = 0x100000,
.offload_features = NETIF_F_IP_CSUM,
};
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
+ .dimension_resources = falcon_dimension_resources,
.fini = efx_port_dummy_op_void,
.monitor = falcon_monitor,
.map_reset_reason = falcon_map_reset_reason,
* interrupt handler only supports 32
* channels */
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
- .tx_dc_base = 0x130000,
- .rx_dc_base = 0x100000,
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
};
* should be allocated for this NIC
* @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user.
+ * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
+ * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
+ * @sram_lim_qw: Qword address limit of SRAM
* @next_buffer_table: First available buffer table id
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
unsigned rxq_entries;
unsigned txq_entries;
+ unsigned tx_dc_base;
+ unsigned rx_dc_base;
+ unsigned sram_lim_qw;
unsigned next_buffer_table;
unsigned n_channels;
unsigned n_rx_channels;
* @probe: Probe the controller
* @remove: Free resources allocated by probe()
* @init: Initialise the controller
+ * @dimension_resources: Dimension controller resources (buffer table,
+ * and VIs once the available interrupt resources are clear)
* @fini: Shut down the controller
* @monitor: Periodic function for polling link state and hardware monitor
* @map_reset_reason: Map ethtool reset reason to a reset method
* @phys_addr_channels: Number of channels with physically addressed
* descriptors
* @timer_period_max: Maximum period of interrupt timer (in ticks)
- * @tx_dc_base: Base address in SRAM of TX queue descriptor caches
- * @rx_dc_base: Base address in SRAM of RX queue descriptor caches
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
*/
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
int (*init)(struct efx_nic *efx);
+ void (*dimension_resources)(struct efx_nic *efx);
void (*fini)(struct efx_nic *efx);
void (*monitor)(struct efx_nic *efx);
enum reset_type (*map_reset_reason)(enum reset_type reason);
unsigned int max_interrupt_mode;
unsigned int phys_addr_channels;
unsigned int timer_period_max;
- unsigned int tx_dc_base;
- unsigned int rx_dc_base;
netdev_features_t offload_features;
};
free_irq(efx->legacy_irq, efx);
}
+void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
+{
+ unsigned vi_count, buftbl_min;
+
+ /* Account for the buffer table entries backing the datapath channels
+ * and the descriptor caches for those channels.
+ */
+ buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
+ efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+ efx->n_channels * EFX_MAX_EVQ_SIZE)
+ * sizeof(efx_qword_t) / EFX_BUF_SIZE);
+ vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+ efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
+ efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
+}
+
u32 efx_nic_fpga_ver(struct efx_nic *efx)
{
efx_oword_t altera_build;
efx_oword_t temp;
/* Set positions of descriptor caches in SRAM. */
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
- efx->type->tx_dc_base / 8);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
- efx->type->rx_dc_base / 8);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
/* Set TX descriptor cache size. */
extern void falcon_stop_nic_stats(struct efx_nic *efx);
extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
+extern void
+efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
extern void efx_nic_init_common(struct efx_nic *efx);
extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
return rc;
}
+static void siena_dimension_resources(struct efx_nic *efx)
+{
+ /* Each port has a small block of internal SRAM dedicated to
+ * the buffer table and descriptor caches. In theory we can
+ * map both blocks to one port, but we don't.
+ */
+ efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
+}
+
static int siena_probe_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
.probe = siena_probe_nic,
.remove = siena_remove_nic,
.init = siena_init_nic,
+ .dimension_resources = siena_dimension_resources,
.fini = efx_port_dummy_op_void,
.monitor = NULL,
.map_reset_reason = siena_map_reset_reason,
* interrupt handler only supports 32
* channels */
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
- .tx_dc_base = 0x88000,
- .rx_dc_base = 0x68000,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE),
};