int i;
int counter;
- /* classify bd */
- if (txq->q.id == IWL_CMD_QUEUE_NUM)
- /* nothing to cleanup after for host commands */
- return;
-
/* sanity check */
counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
if (counter > NUM_TFD_CHUNKS) {
/* Tx queues */
for (txq_id = 0; txq_id <= priv->hw_params.max_txq_num; txq_id++)
- iwl_tx_queue_free(priv, txq_id);
+ if (txq_id == IWL_CMD_QUEUE_NUM)
+ iwl_cmd_queue_free(priv);
+ else
+ iwl_tx_queue_free(priv, txq_id);
+
}
void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
* RX
******************************************************/
void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+void iwl_cmd_queue_free(struct iwl_priv *priv);
int iwl_rx_queue_alloc(struct iwl_priv *priv);
void iwl_rx_handle(struct iwl_priv *priv);
int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
* Free all buffers.
* 0-fill, but do not free "txq" descriptor structure.
*/
-static void iwl_cmd_queue_free(struct iwl_priv *priv)
+void iwl_cmd_queue_free(struct iwl_priv *priv)
{
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_queue *q = &txq->q;
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
- pci_free_consistent(dev, sizeof(struct iwl_tfd) *
+ pci_free_consistent(dev, priv->hw_params.tfd_size *
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
/* 0-fill queue descriptor structure */
memset(txq, 0, sizeof(*txq));
}
+EXPORT_SYMBOL(iwl_cmd_queue_free);
+
/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
* DMA services
*