cancel_delayed_work_sync(&efx->monitor_work);
/* Ensure that all RX slow refills are complete. */
- efx_for_each_rx_queue(rx_queue, efx) {
+ efx_for_each_rx_queue(rx_queue, efx)
cancel_delayed_work_sync(&rx_queue->work);
- }
/* Stop scheduled port reconfigurations */
cancel_work_sync(&efx->reconfigure_work);
falcon_disable_interrupts(efx);
if (efx->legacy_irq)
synchronize_irq(efx->legacy_irq);
- efx_for_each_channel_with_interrupt(channel, efx)
+ efx_for_each_channel_with_interrupt(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
+ }
/* Stop all NAPI processing and synchronous rx refills */
efx_for_each_channel(channel, efx)
efx_oword_t reg;
/* Disable MSI/MSI-X interrupts */
- efx_for_each_channel_with_interrupt(channel, efx)
+ efx_for_each_channel_with_interrupt(channel, efx) {
if (channel->irq)
free_irq(channel->irq, channel);
+ }
/* ACK legacy interrupt */
if (FALCON_REV(efx) >= FALCON_REV_B0)
return 0;
/* Record minimum fill level */
- if (unlikely(fill_level < rx_queue->min_fill))
+ if (unlikely(fill_level < rx_queue->min_fill)) {
if (fill_level)
rx_queue->min_fill = fill_level;
+ }
/* Acquire RX add lock. If this lock is contended, then a fast
* fill must already be in progress (e.g. in the refill
base_dma = tsoh->dma_addr & PAGE_MASK;
p = &tx_queue->tso_headers_free;
- while (*p != NULL)
+ while (*p != NULL) {
if (((unsigned long)*p & PAGE_MASK) == base_kva)
*p = (*p)->next;
else
p = &(*p)->next;
+ }
pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
}
/* Allocate a DMA-mapped header buffer. */
if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
- if (tx_queue->tso_headers_free == NULL)
+ if (tx_queue->tso_headers_free == NULL) {
if (efx_tsoh_block_alloc(tx_queue))
return -1;
+ }
EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
tsoh = tx_queue->tso_headers_free;
tx_queue->tso_headers_free = tsoh->next;
{
unsigned i;
- if (tx_queue->buffer)
+ if (tx_queue->buffer) {
for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
+ }
while (tx_queue->tso_headers_free != NULL)
efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,