iwlagn: clean up TXQ indirection
authorJohannes Berg <johannes.berg@intel.com>
Wed, 4 May 2011 14:50:44 +0000 (07:50 -0700)
committerWey-Yi Guy <wey-yi.w.guy@intel.com>
Fri, 13 May 2011 17:32:04 +0000 (10:32 -0700)
All of these functions no longer need to be
accessed indirectly since they're shared in
all AGN devices.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-2000.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-agn.h
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-tx.c

index b4c81931e136b376c8fa3ec5b1634c719591106e..61d4a11f566b4f9e9864f1109369eade4634c50a 100644 (file)
@@ -171,10 +171,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
 
 static struct iwl_lib_ops iwl1000_lib = {
        .set_hw_params = iwl1000_hw_set_hw_params,
-       .txq_set_sched = iwlagn_txq_set_sched,
-       .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
-       .txq_free_tfd = iwl_hw_txq_free_tfd,
-       .txq_init = iwl_hw_tx_queue_init,
        .rx_handler_setup = iwlagn_rx_handler_setup,
        .setup_deferred_work = iwlagn_setup_deferred_work,
        .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
index 9f34cd7b6399d51a43b76082aefc9e7eb2fcf718..86feec86d13009c9b6266cd7bf102b8f3195af94 100644 (file)
@@ -252,10 +252,6 @@ static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
 
 static struct iwl_lib_ops iwl2000_lib = {
        .set_hw_params = iwl2000_hw_set_hw_params,
-       .txq_set_sched = iwlagn_txq_set_sched,
-       .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
-       .txq_free_tfd = iwl_hw_txq_free_tfd,
-       .txq_init = iwl_hw_tx_queue_init,
        .rx_handler_setup = iwlagn_rx_handler_setup,
        .setup_deferred_work = iwlagn_bt_setup_deferred_work,
        .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
index 09751f2386cb5108a7c4b454f9722f5bd78fae0e..a70b8cfafda195a3b92e1f46d2a7e617c3faba0f 100644 (file)
@@ -339,10 +339,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
 
 static struct iwl_lib_ops iwl5000_lib = {
        .set_hw_params = iwl5000_hw_set_hw_params,
-       .txq_set_sched = iwlagn_txq_set_sched,
-       .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
-       .txq_free_tfd = iwl_hw_txq_free_tfd,
-       .txq_init = iwl_hw_tx_queue_init,
        .rx_handler_setup = iwlagn_rx_handler_setup,
        .setup_deferred_work = iwlagn_setup_deferred_work,
        .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
@@ -374,10 +370,6 @@ static struct iwl_lib_ops iwl5000_lib = {
 
 static struct iwl_lib_ops iwl5150_lib = {
        .set_hw_params = iwl5150_hw_set_hw_params,
-       .txq_set_sched = iwlagn_txq_set_sched,
-       .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
-       .txq_free_tfd = iwl_hw_txq_free_tfd,
-       .txq_init = iwl_hw_tx_queue_init,
        .rx_handler_setup = iwlagn_rx_handler_setup,
        .setup_deferred_work = iwlagn_setup_deferred_work,
        .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
index 860c26e4836d58eaa83db391e460840af1440c01..f8c710db6e6f90345a8a1f42e7ce1b2d8a7d85aa 100644 (file)
@@ -278,10 +278,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
 
 static struct iwl_lib_ops iwl6000_lib = {
        .set_hw_params = iwl6000_hw_set_hw_params,
-       .txq_set_sched = iwlagn_txq_set_sched,
-       .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
-       .txq_free_tfd = iwl_hw_txq_free_tfd,
-       .txq_init = iwl_hw_tx_queue_init,
        .rx_handler_setup = iwlagn_rx_handler_setup,
        .setup_deferred_work = iwlagn_setup_deferred_work,
        .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
@@ -314,10 +310,6 @@ static struct iwl_lib_ops iwl6000_lib = {
 
 static struct iwl_lib_ops iwl6030_lib = {
        .set_hw_params = iwl6000_hw_set_hw_params,
-       .txq_set_sched = iwlagn_txq_set_sched,
-       .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
-       .txq_free_tfd = iwl_hw_txq_free_tfd,
-       .txq_init = iwl_hw_tx_queue_init,
        .rx_handler_setup = iwlagn_bt_rx_handler_setup,
        .setup_deferred_work = iwlagn_bt_setup_deferred_work,
        .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
index 7c1becf9e2c15343ab8da99ee27da9745d3eeeec..d34e103c71cf50eec09dea0521dbcb35b8c1b270 100644 (file)
@@ -750,12 +750,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        spin_unlock(&priv->sta_lock);
 
        /* Attach buffers to TFD */
-       priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-                                                  txcmd_phys, firstlen, 1, 0);
+       iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1, 0);
        if (secondlen > 0)
-               priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-                                                          phys_addr, secondlen,
-                                                          0, 0);
+               iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
+                                            secondlen, 0, 0);
 
        scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
                                offsetof(struct iwl_tx_cmd, scratch);
@@ -911,7 +909,7 @@ int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
        spin_lock_irqsave(&priv->lock, flags);
 
        /* Turn off all Tx DMA fifos */
-       priv->cfg->ops->lib->txq_set_sched(priv, 0);
+       iwlagn_txq_set_sched(priv, 0);
 
        /* Tell NIC where to find the "keep warm" buffer */
        iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
@@ -949,7 +947,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
        spin_lock_irqsave(&priv->lock, flags);
 
        /* Turn off all Tx DMA fifos */
-       priv->cfg->ops->lib->txq_set_sched(priv, 0);
+       iwlagn_txq_set_sched(priv, 0);
 
        /* Tell NIC where to find the "keep warm" buffer */
        iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
@@ -975,7 +973,7 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
        /* Turn off all Tx DMA fifos */
        spin_lock_irqsave(&priv->lock, flags);
 
-       priv->cfg->ops->lib->txq_set_sched(priv, 0);
+       iwlagn_txq_set_sched(priv, 0);
 
        /* Stop each Tx DMA channel, and wait for it to be idle */
        for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
@@ -1258,7 +1256,7 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
 
                iwlagn_txq_inval_byte_cnt_tbl(priv, txq);
 
-               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+               iwlagn_txq_free_tfd(priv, txq);
        }
        return nfreed;
 }
index e35755c577db7a06b235065a2d28f6c6c2f4e4e8..97de5d9de67b5421582ee9c60f9f555f141f3684 100644 (file)
@@ -440,7 +440,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
                        IWL_MASK(0, priv->hw_params.max_txq_num));
 
        /* Activate all Tx DMA/FIFO channels */
-       priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
+       iwlagn_txq_set_sched(priv, IWL_MASK(0, 7));
 
        /* map queues to FIFOs */
        if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
index 2713081ed9961f1ecee3bcdd5f0e4898110bafc3..2bb08d7e067416a6f23b12212795d89ef21ca4ea 100644 (file)
@@ -200,155 +200,6 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
        return err;
 }
 
-static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
-{
-       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
-       dma_addr_t addr = get_unaligned_le32(&tb->lo);
-       if (sizeof(dma_addr_t) > sizeof(u32))
-               addr |=
-               ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
-
-       return addr;
-}
-
-static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
-       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
-       return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
-                                 dma_addr_t addr, u16 len)
-{
-       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-       u16 hi_n_len = len << 4;
-
-       put_unaligned_le32(addr, &tb->lo);
-       if (sizeof(dma_addr_t) > sizeof(u32))
-               hi_n_len |= ((addr >> 16) >> 16) & 0xF;
-
-       tb->hi_n_len = cpu_to_le16(hi_n_len);
-
-       tfd->num_tbs = idx + 1;
-}
-
-static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
-{
-       return tfd->num_tbs & 0x1f;
-}
-
-/**
- * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @priv - driver private data
- * @txq - tx queue
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
-       struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
-       struct iwl_tfd *tfd;
-       struct pci_dev *dev = priv->pci_dev;
-       int index = txq->q.read_ptr;
-       int i;
-       int num_tbs;
-
-       tfd = &tfd_tmp[index];
-
-       /* Sanity check on number of chunks */
-       num_tbs = iwl_tfd_get_num_tbs(tfd);
-
-       if (num_tbs >= IWL_NUM_OF_TBS) {
-               IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
-               /* @todo issue fatal error, it is quite serious situation */
-               return;
-       }
-
-       /* Unmap tx_cmd */
-       if (num_tbs)
-               pci_unmap_single(dev,
-                               dma_unmap_addr(&txq->meta[index], mapping),
-                               dma_unmap_len(&txq->meta[index], len),
-                               PCI_DMA_BIDIRECTIONAL);
-
-       /* Unmap chunks, if any. */
-       for (i = 1; i < num_tbs; i++)
-               pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
-                               iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
-
-       /* free SKB */
-       if (txq->txb) {
-               struct sk_buff *skb;
-
-               skb = txq->txb[txq->q.read_ptr].skb;
-
-               /* can be called from irqs-disabled context */
-               if (skb) {
-                       dev_kfree_skb_any(skb);
-                       txq->txb[txq->q.read_ptr].skb = NULL;
-               }
-       }
-}
-
-int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
-                                struct iwl_tx_queue *txq,
-                                dma_addr_t addr, u16 len,
-                                u8 reset, u8 pad)
-{
-       struct iwl_queue *q;
-       struct iwl_tfd *tfd, *tfd_tmp;
-       u32 num_tbs;
-
-       q = &txq->q;
-       tfd_tmp = (struct iwl_tfd *)txq->tfds;
-       tfd = &tfd_tmp[q->write_ptr];
-
-       if (reset)
-               memset(tfd, 0, sizeof(*tfd));
-
-       num_tbs = iwl_tfd_get_num_tbs(tfd);
-
-       /* Each TFD can point to a maximum 20 Tx buffers */
-       if (num_tbs >= IWL_NUM_OF_TBS) {
-               IWL_ERR(priv, "Error can not send more than %d chunks\n",
-                         IWL_NUM_OF_TBS);
-               return -EINVAL;
-       }
-
-       if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
-               return -EINVAL;
-
-       if (unlikely(addr & ~IWL_TX_DMA_MASK))
-               IWL_ERR(priv, "Unaligned address = %llx\n",
-                         (unsigned long long)addr);
-
-       iwl_tfd_set_tb(tfd, num_tbs, addr, len);
-
-       return 0;
-}
-
-/*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- *
- * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
- * channels supported in hardware.
- */
-int iwl_hw_tx_queue_init(struct iwl_priv *priv,
-                        struct iwl_tx_queue *txq)
-{
-       int txq_id = txq->q.id;
-
-       /* Circular buffer (TFD queue in DRAM) physical base address */
-       iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
-                            txq->q.dma_addr >> 8);
-
-       return 0;
-}
-
 static void iwl_bg_beacon_update(struct work_struct *work)
 {
        struct iwl_priv *priv =
index fe33fe8aa4185ed0a47bb668048745df7cd32e0b..269e0c47927b3a2a7fb6ccc3e84ca6cbe148db0d 100644 (file)
@@ -191,12 +191,10 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
 void iwl_setup_rx_handlers(struct iwl_priv *priv);
 
 /* tx */
-void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
-int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
                                 struct iwl_tx_queue *txq,
                                 dma_addr_t addr, u16 len, u8 reset, u8 pad);
-int iwl_hw_tx_queue_init(struct iwl_priv *priv,
-                        struct iwl_tx_queue *txq);
 void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
                              struct ieee80211_tx_info *info);
 int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
index 5b5b0cce4a54e7c85d89fb5e45e4b9c7c2853b97..3bb76f6ea41017448c5cc5ce99851c967d66dafe 100644 (file)
@@ -127,16 +127,6 @@ struct iwl_temp_ops {
 struct iwl_lib_ops {
        /* set hw dependent parameters */
        int (*set_hw_params)(struct iwl_priv *priv);
-       /* Handling TX */
-       void (*txq_set_sched)(struct iwl_priv *priv, u32 mask);
-       int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
-                                    struct iwl_tx_queue *txq,
-                                    dma_addr_t addr,
-                                    u16 len, u8 reset, u8 pad);
-       void (*txq_free_tfd)(struct iwl_priv *priv,
-                            struct iwl_tx_queue *txq);
-       int (*txq_init)(struct iwl_priv *priv,
-                       struct iwl_tx_queue *txq);
        /* setup Rx handler */
        void (*rx_handler_setup)(struct iwl_priv *priv);
        /* setup deferred work */
index 7843195efb0519524e919c49da8531e83c7d68b7..302284bef961c17a86b363b75c85a2c2d69ab243 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/slab.h>
 #include <net/mac80211.h>
 #include "iwl-eeprom.h"
+#include "iwl-agn.h"
 #include "iwl-dev.h"
 #include "iwl-core.h"
 #include "iwl-sta.h"
@@ -85,6 +86,154 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
        txq->need_update = 0;
 }
 
+static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+       dma_addr_t addr = get_unaligned_le32(&tb->lo);
+       if (sizeof(dma_addr_t) > sizeof(u32))
+               addr |=
+               ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+
+       return addr;
+}
+
+static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+       return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
+                                 dma_addr_t addr, u16 len)
+{
+       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+       u16 hi_n_len = len << 4;
+
+       put_unaligned_le32(addr, &tb->lo);
+       if (sizeof(dma_addr_t) > sizeof(u32))
+               hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+       tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+       tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+       return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @priv - driver private data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+       struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
+       struct iwl_tfd *tfd;
+       struct pci_dev *dev = priv->pci_dev;
+       int index = txq->q.read_ptr;
+       int i;
+       int num_tbs;
+
+       tfd = &tfd_tmp[index];
+
+       /* Sanity check on number of chunks */
+       num_tbs = iwl_tfd_get_num_tbs(tfd);
+
+       if (num_tbs >= IWL_NUM_OF_TBS) {
+               IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
+               /* @todo issue fatal error, it is quite serious situation */
+               return;
+       }
+
+       /* Unmap tx_cmd */
+       if (num_tbs)
+               pci_unmap_single(dev,
+                               dma_unmap_addr(&txq->meta[index], mapping),
+                               dma_unmap_len(&txq->meta[index], len),
+                               PCI_DMA_BIDIRECTIONAL);
+
+       /* Unmap chunks, if any. */
+       for (i = 1; i < num_tbs; i++)
+               pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
+                               iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
+
+       /* free SKB */
+       if (txq->txb) {
+               struct sk_buff *skb;
+
+               skb = txq->txb[txq->q.read_ptr].skb;
+
+               /* can be called from irqs-disabled context */
+               if (skb) {
+                       dev_kfree_skb_any(skb);
+                       txq->txb[txq->q.read_ptr].skb = NULL;
+               }
+       }
+}
+
+int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+                                struct iwl_tx_queue *txq,
+                                dma_addr_t addr, u16 len,
+                                u8 reset, u8 pad)
+{
+       struct iwl_queue *q;
+       struct iwl_tfd *tfd, *tfd_tmp;
+       u32 num_tbs;
+
+       q = &txq->q;
+       tfd_tmp = (struct iwl_tfd *)txq->tfds;
+       tfd = &tfd_tmp[q->write_ptr];
+
+       if (reset)
+               memset(tfd, 0, sizeof(*tfd));
+
+       num_tbs = iwl_tfd_get_num_tbs(tfd);
+
+       /* Each TFD can point to a maximum 20 Tx buffers */
+       if (num_tbs >= IWL_NUM_OF_TBS) {
+               IWL_ERR(priv, "Error can not send more than %d chunks\n",
+                         IWL_NUM_OF_TBS);
+               return -EINVAL;
+       }
+
+       if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
+               return -EINVAL;
+
+       if (unlikely(addr & ~IWL_TX_DMA_MASK))
+               IWL_ERR(priv, "Unaligned address = %llx\n",
+                         (unsigned long long)addr);
+
+       iwl_tfd_set_tb(tfd, num_tbs, addr, len);
+
+       return 0;
+}
+
+/*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ *
+ * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
+ * channels supported in hardware.
+ */
+static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+       int txq_id = txq->q.id;
+
+       /* Circular buffer (TFD queue in DRAM) physical base address */
+       iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
+                            txq->q.dma_addr >> 8);
+
+       return 0;
+}
+
 /**
  * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
  */
@@ -97,7 +246,7 @@ void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
                return;
 
         while (q->write_ptr != q->read_ptr) {
-               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+               iwlagn_txq_free_tfd(priv, txq);
                q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
        }
 }
@@ -391,7 +540,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
                return ret;
 
        /* Tell device where to find queue */
-       priv->cfg->ops->lib->txq_init(priv, txq);
+       iwlagn_tx_queue_init(priv, txq);
 
        return 0;
 err:
@@ -420,7 +569,7 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
        iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
 
        /* Tell device where to find queue */
-       priv->cfg->ops->lib->txq_init(priv, txq);
+       iwlagn_tx_queue_init(priv, txq);
 }
 
 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
@@ -553,9 +702,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
 
        trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
 
-       priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-                                                  phys_addr, fix_size, 1,
-                                                  U32_PAD(cmd->len[0]));
+       iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, fix_size, 1,
+                                    U32_PAD(cmd->len[0]));
 
        /* Increment and update queue's write index */
        q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);