/* Tx DMA processing queues */
struct iwl_tx_queue *txq;
unsigned long txq_ctx_active_msk;
- struct iwl_dma_ptr kw; /* keep warm address */
/* counts mgmt, ctl, and data packets */
struct traffic_stats tx_stats;
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
+ * @kw: keep warm address
*/
struct iwl_trans_pcie {
struct iwl_rx_queue rxq;
u32 inta_mask;
u32 scd_base_addr;
struct iwl_dma_ptr scd_bc_tbls;
+ struct iwl_dma_ptr kw;
};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
kfree(priv->txq);
priv->txq = NULL;
- iwlagn_free_dma_ptr(trans, &priv->kw);
+ iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
}
}
/* Alloc keep-warm buffer */
- ret = iwlagn_alloc_dma_ptr(trans, &priv->kw, IWL_KW_SIZE);
+ ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
if (ret) {
IWL_ERR(trans, "Keep Warm allocation failed\n");
goto error;
unsigned long flags;
bool alloc = false;
struct iwl_priv *priv = priv(trans);
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
if (!priv->txq) {
ret = iwl_trans_tx_alloc(trans);
iwl_write_prph(priv, SCD_TXFACT, 0);
/* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, trans_pcie->kw.dma >> 4);
spin_unlock_irqrestore(&trans->shrd->lock, flags);