netif_addr_unlock_bh(efx->net_dev);
}
+ falcon_stop_nic_stats(efx);
falcon_deconfigure_mac_wrapper(efx);
/* Reconfigure the PHY, disabling transmit in mac level loopback. */
efx->mac_op->reconfigure(efx);
+ falcon_start_nic_stats(efx);
+
/* Inform kernel of loss/gain of carrier */
efx_link_status_changed(efx);
return;
efx->mac_op->reconfigure(efx);
efx->port_initialized = true;
- efx_stats_enable(efx);
mutex_unlock(&efx->mac_lock);
return 0;
if (!efx->port_initialized)
return;
- efx_stats_disable(efx);
efx->phy_op->fini(efx);
efx->port_initialized = false;
if (efx->state == STATE_RUNNING)
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
+
+ falcon_start_nic_stats(efx);
}
/* Flush all delayed work. Should only be called when no more delayed work
if (!efx->port_enabled)
return;
+ falcon_stop_nic_stats(efx);
+
/* Disable interrupts and wait for ISR to complete */
falcon_disable_interrupts(efx);
if (efx->legacy_irq)
return 0;
}
-void efx_stats_disable(struct efx_nic *efx)
-{
- spin_lock(&efx->stats_lock);
- ++efx->stats_disable_count;
- spin_unlock(&efx->stats_lock);
-}
-
-void efx_stats_enable(struct efx_nic *efx)
-{
- spin_lock(&efx->stats_lock);
- --efx->stats_disable_count;
- spin_unlock(&efx->stats_lock);
-}
-
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
{
struct efx_mac_stats *mac_stats = &efx->mac_stats;
struct net_device_stats *stats = &net_dev->stats;
- /* Update stats if possible, but do not wait if another thread
- * is updating them or if MAC stats fetches are temporarily
- * disabled; slightly stale stats are acceptable.
- */
- if (!spin_trylock(&efx->stats_lock))
- return stats;
- if (!efx->stats_disable_count) {
- efx->mac_op->update_stats(efx);
- falcon_update_nic_stats(efx);
- }
- spin_unlock(&efx->stats_lock);
+ spin_lock_bh(&efx->stats_lock);
+ falcon_update_nic_stats(efx);
+ spin_unlock_bh(&efx->stats_lock);
stats->rx_packets = mac_stats->rx_packets;
stats->tx_packets = mac_stats->tx_packets;
{
EFX_ASSERT_RESET_SERIALISED(efx);
- efx_stats_disable(efx);
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
mutex_lock(&efx->spi_lock);
mutex_unlock(&efx->spi_lock);
mutex_unlock(&efx->mac_lock);
- if (ok) {
+ if (ok)
efx_start_all(efx);
- efx_stats_enable(efx);
- }
return rc;
}
efx->rx_checksum_enabled = true;
spin_lock_init(&efx->netif_stop_lock);
spin_lock_init(&efx->stats_lock);
- efx->stats_disable_count = 1;
mutex_init(&efx->mac_lock);
efx->mac_op = &efx_dummy_mac_operations;
efx->phy_op = &efx_dummy_phy_operations;
goto fail4;
}
- /* Switch to the running state before we expose the device to
- * the OS. This is to ensure that the initial gathering of
- * MAC stats succeeds. */
+ /* Switch to the running state before we expose the device to the OS,
+ * so that dev_open()|efx_start_all() will actually start the device */
efx->state = STATE_RUNNING;
rc = efx_register_netdev(efx);
#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1)
/* Ports */
-extern void efx_stats_disable(struct efx_nic *efx);
-extern void efx_stats_enable(struct efx_nic *efx);
extern void efx_reconfigure_port(struct efx_nic *efx);
extern void __efx_reconfigure_port(struct efx_nic *efx);
**************************************************************************
*/
-static int disable_dma_stats;
-
/* This is set to 16 for a good reason. In summary, if larger than
* 16, the descriptor cache holds more than a default socket
* buffer's worth of packets (for UDP we can only have at most one
/* MAC stats will fail whilst the TX fifo is draining. Serialise
* the drain sequence with the statistics fetch */
- efx_stats_disable(efx);
+ falcon_stop_nic_stats(efx);
efx_reado(efx, ®, FR_AB_MAC_CTRL);
EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 1);
udelay(10);
}
- efx_stats_enable(efx);
-
/* If we've reset the EM block and the link is up, then
* we'll have to kick the XAUI link so the PHY can recover */
if (efx->link_state.up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
falcon_reset_xaui(efx);
+ falcon_start_nic_stats(efx);
+
return 0;
}
efx_writeo(efx, ®, FR_AZ_RX_CFG);
}
-int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
+static void falcon_stats_request(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t reg;
- u32 *dma_done;
- int i;
- if (disable_dma_stats)
- return 0;
+ WARN_ON(nic_data->stats_pending);
+ WARN_ON(nic_data->stats_disable_count);
- /* Statistics fetch will fail if the MAC is in TX drain */
- if (falcon_rev(efx) >= FALCON_REV_B0) {
- efx_oword_t temp;
- efx_reado(efx, &temp, FR_AB_MAC_CTRL);
- if (EFX_OWORD_FIELD(temp, FRF_BB_TXFIFO_DRAIN_EN))
- return 0;
- }
+ if (nic_data->stats_dma_done == NULL)
+ return; /* no mac selected */
- dma_done = (efx->stats_buffer.addr + done_offset);
- *dma_done = FALCON_STATS_NOT_DONE;
+ *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
+ nic_data->stats_pending = true;
wmb(); /* ensure done flag is clear */
/* Initiate DMA transfer of stats */
efx->stats_buffer.dma_addr);
efx_writeo(efx, ®, FR_AB_MAC_STAT_DMA);
- /* Wait for transfer to complete */
- for (i = 0; i < 400; i++) {
- if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
- rmb(); /* Ensure the stats are valid. */
- return 0;
- }
- udelay(10);
+ mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
+}
+
+static void falcon_stats_complete(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ if (!nic_data->stats_pending)
+ return;
+
+ nic_data->stats_pending = 0;
+ if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
+ rmb(); /* read the done flag before the stats */
+ efx->mac_op->update_stats(efx);
+ } else {
+ EFX_ERR(efx, "timed out waiting for statistics\n");
}
+}
- EFX_ERR(efx, "timed out waiting for statistics\n");
- return -ETIMEDOUT;
+static void falcon_stats_timer_func(unsigned long context)
+{
+ struct efx_nic *efx = (struct efx_nic *)context;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ spin_lock(&efx->stats_lock);
+
+ falcon_stats_complete(efx);
+ if (nic_data->stats_disable_count == 0)
+ falcon_stats_request(efx);
+
+ spin_unlock(&efx->stats_lock);
}
/**************************************************************************
int falcon_switch_mac(struct efx_nic *efx)
{
struct efx_mac_operations *old_mac_op = efx->mac_op;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ unsigned int stats_done_offset;
int rc = 0;
/* Don't try to fetch MAC stats while we're switching MACs */
- efx_stats_disable(efx);
+ falcon_stop_nic_stats(efx);
/* Internal loopbacks override the phy speed setting */
if (efx->loopback_mode == LOOPBACK_GMAC) {
efx->mac_op = (EFX_IS10G(efx) ?
&falcon_xmac_operations : &falcon_gmac_operations);
+ if (EFX_IS10G(efx))
+ stats_done_offset = XgDmaDone_offset;
+ else
+ stats_done_offset = GDmaDone_offset;
+ nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset;
+
if (old_mac_op == efx->mac_op)
goto out;
rc = falcon_reset_macs(efx);
out:
- efx_stats_enable(efx);
+ falcon_start_nic_stats(efx);
return rc;
}
goto fail6;
}
+ nic_data->stats_disable_count = 1;
+ setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
+ (unsigned long)efx);
+
return 0;
fail6:
void falcon_update_nic_stats(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t cnt;
+ if (nic_data->stats_disable_count)
+ return;
+
efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
efx->n_rx_nodesc_drop_cnt +=
EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+
+ if (nic_data->stats_pending &&
+ *nic_data->stats_dma_done == FALCON_STATS_DONE) {
+ nic_data->stats_pending = false;
+ rmb(); /* read the done flag before the stats */
+ efx->mac_op->update_stats(efx);
+ }
+}
+
+void falcon_start_nic_stats(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ spin_lock_bh(&efx->stats_lock);
+ if (--nic_data->stats_disable_count == 0)
+ falcon_stats_request(efx);
+ spin_unlock_bh(&efx->stats_lock);
+}
+
+void falcon_stop_nic_stats(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int i;
+
+ might_sleep();
+
+ spin_lock_bh(&efx->stats_lock);
+ ++nic_data->stats_disable_count;
+ spin_unlock_bh(&efx->stats_lock);
+
+ del_timer_sync(&nic_data->stats_timer);
+
+ /* Wait enough time for the most recent transfer to
+ * complete. */
+ for (i = 0; i < 4 && nic_data->stats_pending; i++) {
+ if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
+ break;
+ msleep(1);
+ }
+
+ spin_lock_bh(&efx->stats_lock);
+ falcon_stats_complete(efx);
+ spin_unlock_bh(&efx->stats_lock);
}
/**************************************************************************
* struct falcon_nic_data - Falcon NIC state
* @pci_dev2: Secondary function of Falcon A
* @board: Board state and functions
+ * @stats_disable_count: Nest count for disabling statistics fetches
+ * @stats_pending: Is there a pending DMA of MAC statistics.
+ * @stats_timer: A timer for regularly fetching MAC statistics.
+ * @stats_dma_done: Pointer to the flag which indicates DMA completion.
*/
struct falcon_nic_data {
struct pci_dev *pci_dev2;
struct falcon_board board;
+ unsigned int stats_disable_count;
+ bool stats_pending;
+ struct timer_list stats_timer;
+ u32 *stats_dma_done;
};
static inline struct falcon_board *falcon_board(struct efx_nic *efx)
/* MAC/PHY */
extern int falcon_switch_mac(struct efx_nic *efx);
extern bool falcon_xaui_link_ok(struct efx_nic *efx);
-extern int falcon_dma_stats(struct efx_nic *efx,
- unsigned int done_offset);
extern void falcon_drain_tx_fifo(struct efx_nic *efx);
extern void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
extern void falcon_remove_nic(struct efx_nic *efx);
extern void falcon_update_nic_stats(struct efx_nic *efx);
+extern void falcon_start_nic_stats(struct efx_nic *efx);
+extern void falcon_stop_nic_stats(struct efx_nic *efx);
extern void falcon_set_multicast_hash(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
* MAC stats accordingly. */
efx->phy_mode = new_mode;
if (new_mode & PHY_MODE_SPECIAL)
- efx_stats_disable(efx);
+ falcon_stop_nic_stats(efx);
if (falcon_board(efx)->type->id == FALCON_BOARD_SFE4001)
err = sfe4001_poweron(efx);
else
err = sfn4111t_reset(efx);
efx_reconfigure_port(efx);
if (!(new_mode & PHY_MODE_SPECIAL))
- efx_stats_enable(efx);
+ falcon_start_nic_stats(efx);
}
rtnl_unlock();
if (efx->phy_mode & PHY_MODE_SPECIAL) {
/* PHY won't generate a 156.25 MHz clock and MAC stats fetch
* will fail. */
- efx_stats_disable(efx);
+ falcon_stop_nic_stats(efx);
}
rc = sfe4001_poweron(efx);
if (rc)
return;
efx->phy_mode = PHY_MODE_SPECIAL;
- efx_stats_disable(efx);
+ falcon_stop_nic_stats(efx);
}
sfn4111t_reset(efx);
if (efx->phy_mode & PHY_MODE_SPECIAL)
/* PHY may not generate a 156.25 MHz clock and MAC
* stats fetch will fail. */
- efx_stats_disable(efx);
+ falcon_stop_nic_stats(efx);
return 0;
struct efx_mac_stats *mac_stats = &efx->mac_stats;
unsigned long old_rx_pause, old_tx_pause;
unsigned long new_rx_pause, new_tx_pause;
- int rc;
-
- rc = falcon_dma_stats(efx, GDmaDone_offset);
- if (rc)
- return;
/* Pause frames are erroneously counted as errors (SFC bug 3269) */
old_rx_pause = mac_stats->rx_pause;
int falcon_reset_xaui(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t reg;
int count;
+ /* Don't fetch MAC statistics over an XMAC reset */
+ WARN_ON(nic_data->stats_disable_count == 0);
+
/* Start reset sequence */
EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
efx_writeo(efx, ®, FR_AB_XX_PWR_RST);
/* XAUI link is expected to be down */
return;
+ falcon_stop_nic_stats(efx);
+
while (!efx->mac_up && tries) {
EFX_LOG(efx, "bashing xaui\n");
falcon_reset_xaui(efx);
efx->mac_up = falcon_xaui_link_ok(efx);
--tries;
}
+
+ falcon_start_nic_stats(efx);
}
static void falcon_reconfigure_xmac(struct efx_nic *efx)
static void falcon_update_stats_xmac(struct efx_nic *efx)
{
struct efx_mac_stats *mac_stats = &efx->mac_stats;
- int rc;
-
- rc = falcon_dma_stats(efx, XgDmaDone_offset);
- if (rc)
- return;
/* Update MAC stats from DMAed values */
FALCON_STAT(efx, XgRxOctets, rx_bytes);
* &struct net_device_stats.
* @stats_buffer: DMA buffer for statistics
* @stats_lock: Statistics update lock. Serialises statistics fetches
- * @stats_disable_count: Nest count for disabling statistics fetches
* @mac_op: MAC interface
* @mac_address: Permanent MAC address
* @phy_type: PHY type
struct efx_mac_stats mac_stats;
struct efx_buffer stats_buffer;
spinlock_t stats_lock;
- unsigned int stats_disable_count;
struct efx_mac_operations *mac_op;
unsigned char mac_address[ETH_ALEN];
/* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
* a special software reset can glitch the XGMAC sufficiently for stats
* requests to fail. */
- efx_stats_disable(efx);
+ falcon_stop_nic_stats(efx);
/* Initiate reset */
reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
/* Wait for the XGXS state machine to churn */
mdelay(10);
out:
- efx_stats_enable(efx);
+ falcon_start_nic_stats(efx);
return rc;
}